diff --git a/go.mod b/go.mod index 28d647328..ebe6f7f07 100644 --- a/go.mod +++ b/go.mod @@ -27,8 +27,8 @@ require ( kmodules.xyz/client-go v0.34.2 kmodules.xyz/custom-resources v0.34.0 kmodules.xyz/monitoring-agent-api v0.34.0 - kubedb.dev/apimachinery v0.60.0-rc.0.0.20251227140622-3fb97b2591c2 - kubedb.dev/db-client-go v0.15.0-rc.0 + kubedb.dev/apimachinery v0.60.0-rc.1 + kubedb.dev/db-client-go v0.15.0-rc.1 kubeops.dev/petset v0.0.15 sigs.k8s.io/controller-runtime v0.22.4 sigs.k8s.io/yaml v1.6.0 @@ -99,7 +99,7 @@ require ( github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect @@ -121,7 +121,7 @@ require ( github.com/prometheus-operator/prometheus-operator/pkg/client v0.87.1 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/procfs v0.17.0 // indirect - github.com/redis/go-redis/v9 v9.6.3 // indirect + github.com/redis/go-redis/v9 v9.17.2 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/spf13/pflag v1.0.10 // indirect @@ -130,14 +130,14 @@ require ( github.com/yudai/gojsondiff v1.0.0 // indirect github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel v1.37.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect + go.uber.org/zap v1.27.1 // indirect go.virtual-secrets.dev/apimachinery v0.0.1 // indirect - golang.org/x/net v0.47.0 // indirect + golang.org/x/net v0.48.0 // indirect golang.org/x/oauth2 v0.33.0 // indirect golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.39.0 // indirect diff --git a/go.sum b/go.sum index 84d7a34b8..75c33b907 100644 --- a/go.sum +++ b/go.sum @@ -257,10 +257,10 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= -github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= -github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kmodules/apiserver v0.34.4-0.20251227112449-07fa35efc6fc h1:R5bKc1c8Qu7z+7+O0xNWxIPjCYuaHUVZ+dSfeCZEd+c= github.com/kmodules/apiserver v0.34.4-0.20251227112449-07fa35efc6fc/go.mod h1:QPnnahMO5C2m3lm6fPW3+JmyQbvHZQ8uudAu/493P2w= github.com/kmodules/controller-runtime v0.22.5-0.20251227114913-f011264689cd h1:cpLV7Pr+pSo3kDYY4HsLZfbdF1WPQuPTP+Jo3hyoWzw= @@ -371,12 +371,12 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/redis/go-redis/v9 v9.6.3 h1:8Dr5ygF1QFXRxIH/m3Xg9MMG1rS8YCtAgosrsewT6i0= -github.com/redis/go-redis/v9 v9.6.3/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= +github.com/redis/go-redis/v9 v9.17.2 h1:P2EGsA4qVIM3Pp+aPocCJ7DguDHhqrXNhVcEp4ViluI= +github.com/redis/go-redis/v9 v9.17.2/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -442,16 +442,16 @@ github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaD go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -459,8 +459,8 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.virtual-secrets.dev/apimachinery v0.0.1 h1:Xc8l+5yaH2bAHmqk5bkXTOFkOYjCZnhPoaVxtLJZRdM= go.virtual-secrets.dev/apimachinery v0.0.1/go.mod h1:DAilea9CBrKoDRbxoZ27jAW/df45KZy1iPDGTClKSTs= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= @@ -522,8 +522,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -613,8 +613,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -744,10 +744,10 @@ kmodules.xyz/prober v0.34.0 h1:ElZkZYCjLaytAA0M8EH42To7i9gh1IIX+d0qfaIohys= kmodules.xyz/prober v0.34.0/go.mod h1:rsu/fxxfNxY70GDbH6Ju8G66459hi7AhWSSBoiIp8ic= kmodules.xyz/resource-metadata v0.40.2 h1:2J+UvAaHXfqDStO2SKqeVkER4z/kSOqpj8Iyrc9+V4Y= kmodules.xyz/resource-metadata v0.40.2/go.mod h1:38+41aUSrWqrQDeaSITKoxAiGT0ysQk5yjRODqBadpw= -kubedb.dev/apimachinery v0.60.0-rc.0.0.20251227140622-3fb97b2591c2 h1:Mlf68kveAm22ErXP+7z9axuiirrLzg8/MKsV4rBNpHo= -kubedb.dev/apimachinery v0.60.0-rc.0.0.20251227140622-3fb97b2591c2/go.mod h1:C//93Ze70E3a56BAPTjDSXqTYK1LbTw6GXC0JngWTjI= -kubedb.dev/db-client-go v0.15.0-rc.0 h1:2C/ua2FnDsvAcYLSdU5dLPFWEjIkioUCBfAR85TID2c= -kubedb.dev/db-client-go v0.15.0-rc.0/go.mod h1:d/tBK5AeezNKeEXCoBJxbyWpxOzr3b9gxPdG3huADoE= +kubedb.dev/apimachinery v0.60.0-rc.1 h1:4e5bDwLjf4VravncyleYrmamtHP0+7EihXW/Z9tYzv4= +kubedb.dev/apimachinery v0.60.0-rc.1/go.mod h1:C//93Ze70E3a56BAPTjDSXqTYK1LbTw6GXC0JngWTjI= +kubedb.dev/db-client-go v0.15.0-rc.1 h1:0UG62PF9yHpjuL2i/v4Wc1+SaR8Ptk0OBMtoj26fEJU= +kubedb.dev/db-client-go v0.15.0-rc.1/go.mod h1:XN/kbqduVcvWIyHvvhk64doVGHB/tQ/ipEiB7SSS4G8= kubeops.dev/csi-driver-cacerts v0.4.0 h1:aGZjDVk9Rv5a0EqDQ/atvg9yVhN7xEU5VKaNI2m1g74= kubeops.dev/csi-driver-cacerts v0.4.0/go.mod h1:v1595ZuAd8PGs6egLt7dj6EVWTifdLqRKzUGEqp6eI0= kubeops.dev/petset v0.0.15 h1:iwTRFAp0RNw0A87sw2c97UZ6WIA9H/nhJBpDhXLa7fk= diff --git a/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml index 944cc0007..1b695b62c 100644 --- a/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml +++ b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml @@ -1,5 +1,4 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com +version: 2 builds: - @@ -27,16 +26,7 @@ builds: archives: - id: cpuid - name_template: "cpuid-{{ .Os }}_{{ .Arch }}_{{ .Version }}" - replacements: - aix: AIX - darwin: OSX - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 - freebsd: FreeBSD - netbsd: NetBSD + name_template: "cpuid-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" format_overrides: - goos: windows format: zip @@ -44,8 +34,6 @@ archives: - LICENSE checksum: name_template: 'checksums.txt' -snapshot: - name_template: "{{ .Tag }}-next" changelog: sort: asc filters: @@ -58,7 +46,7 @@ changelog: nfpms: - - file_name_template: "cpuid_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + file_name_template: "cpuid_package_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" vendor: Klaus Post homepage: https://github.com/klauspost/cpuid maintainer: Klaus Post @@ -67,8 +55,3 @@ nfpms: formats: - deb - rpm - replacements: - darwin: Darwin - linux: Linux - freebsd: FreeBSD - amd64: x86_64 diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md index f06ba51c5..e59d3d0c0 100644 --- a/vendor/github.com/klauspost/cpuid/v2/README.md +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -282,7 +282,9 @@ Exit Code 1 | AMXINT8 | Tile computational operations on 8-bit integers | | AMXFP16 | Tile computational operations on FP16 numbers | | AMXFP8 | Tile computational operations on FP8 numbers | +| AMXCOMPLEX | Tile computational operations on complex numbers | | AMXTILE | Tile architecture | +| AMXTF32 | Matrix Multiplication of TF32 Tiles into Packed Single Precision Tile | | APX_F | Intel APX | | AVX | AVX functions | | AVX10 | If set the Intel AVX10 Converged Vector ISA is supported | @@ -480,12 +482,16 @@ Exit Code 1 | DCPOP | Data cache clean to Point of Persistence (DC CVAP) | | EVTSTRM | Generic timer | | FCMA | Floatin point complex number addition and multiplication | +| FHM | FMLAL and FMLSL instructions | | FP | Single-precision and double-precision floating point | | FPHP | Half-precision floating point | | GPA | Generic Pointer Authentication | | JSCVT | Javascript-style double->int convert (FJCVTZS) | | LRCPC | Weaker release consistency (LDAPR, etc) | | PMULL | Polynomial Multiply instructions (PMULL/PMULL2) | +| RNDR | Random Number instructions | +| TLB | Outer Shareable and TLB range maintenance instructions | +| TS | Flag manipulation instructions | | SHA1 | SHA-1 instructions (SHA1C, etc) | | SHA2 | SHA-2 instructions (SHA256H, etc) | | SHA3 | SHA-3 instructions (EOR3, RAXI, XAR, BCAX) | diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index db99eb62f..8103fb343 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -83,6 +83,8 @@ const ( AMXINT8 // Tile computational operations on 8-bit integers AMXFP8 // Tile computational operations on FP8 numbers AMXTILE // Tile architecture + AMXTF32 // Tile architecture + AMXCOMPLEX // Matrix Multiplication of TF32 Tiles into Packed Single Precision Tile APX_F // Intel APX AVX // AVX functions AVX10 // If set the Intel AVX10 Converged Vector ISA is supported @@ -282,12 +284,16 @@ const ( DCPOP // Data cache clean to Point of Persistence (DC CVAP) EVTSTRM // Generic timer FCMA // Floatin point complex number addition and multiplication + FHM // FMLAL and FMLSL instructions FP // Single-precision and double-precision floating point FPHP // Half-precision floating point GPA // Generic Pointer Authentication JSCVT // Javascript-style double->int convert (FJCVTZS) LRCPC // Weaker release consistency (LDAPR, etc) PMULL // Polynomial Multiply instructions (PMULL/PMULL2) + RNDR // Random Number instructions + TLB // Outer Shareable and TLB range maintenance instructions + TS // Flag manipulation instructions SHA1 // SHA-1 instructions (SHA1C, etc) SHA2 // SHA-2 instructions (SHA256H, etc) SHA3 // SHA-3 instructions (EOR3, RAXI, XAR, BCAX) @@ -532,7 +538,7 @@ func (c CPUInfo) Ia32TscAux() uint32 { return ecx } -// SveLengths returns arm SVE vector and predicate lengths. +// SveLengths returns arm SVE vector and predicate lengths in bits. // Will return 0, 0 if SVE is not enabled or otherwise unable to detect. func (c CPUInfo) SveLengths() (vl, pl uint64) { if !c.Has(SVE) { @@ -1284,6 +1290,8 @@ func support() flagSet { // CPUID.(EAX=7, ECX=1).EDX fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8) fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT) + fs.setIf(edx1&(1<<7) != 0, AMXTF32) + fs.setIf(edx1&(1<<8) != 0, AMXCOMPLEX) fs.setIf(edx1&(1<<10) != 0, AVXVNNIINT16) fs.setIf(edx1&(1<<14) != 0, PREFETCHI) fs.setIf(edx1&(1<<19) != 0, AVX10) diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go index 566743d22..9ae32d607 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go @@ -157,6 +157,10 @@ func addInfo(c *CPUInfo, safe bool) { // x--------------------------------------------------x // | Name | bits | visible | // |--------------------------------------------------| + // | RNDR | [63-60] | y | + // |--------------------------------------------------| + // | TLB | [59-56] | y | + // |--------------------------------------------------| // | TS | [55-52] | y | // |--------------------------------------------------| // | FHM | [51-48] | y | @@ -182,12 +186,10 @@ func addInfo(c *CPUInfo, safe bool) { // | AES | [7-4] | y | // x--------------------------------------------------x - // if instAttrReg0&(0xf<<52) != 0 { - // fmt.Println("TS") - // } - // if instAttrReg0&(0xf<<48) != 0 { - // fmt.Println("FHM") - // } + f.setIf(instAttrReg0&(0xf<<60) != 0, RNDR) + f.setIf(instAttrReg0&(0xf<<56) != 0, TLB) + f.setIf(instAttrReg0&(0xf<<52) != 0, TS) + f.setIf(instAttrReg0&(0xf<<48) != 0, FHM) f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP) f.setIf(instAttrReg0&(0xf<<40) != 0, SM4) f.setIf(instAttrReg0&(0xf<<36) != 0, SM3) diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index e7f874a7e..04760c1af 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -17,223 +17,229 @@ func _() { _ = x[AMXINT8-7] _ = x[AMXFP8-8] _ = x[AMXTILE-9] - _ = x[APX_F-10] - _ = x[AVX-11] - _ = x[AVX10-12] - _ = x[AVX10_128-13] - _ = x[AVX10_256-14] - _ = x[AVX10_512-15] - _ = x[AVX2-16] - _ = x[AVX512BF16-17] - _ = x[AVX512BITALG-18] - _ = x[AVX512BW-19] - _ = x[AVX512CD-20] - _ = x[AVX512DQ-21] - _ = x[AVX512ER-22] - _ = x[AVX512F-23] - _ = x[AVX512FP16-24] - _ = x[AVX512IFMA-25] - _ = x[AVX512PF-26] - _ = x[AVX512VBMI-27] - _ = x[AVX512VBMI2-28] - _ = x[AVX512VL-29] - _ = x[AVX512VNNI-30] - _ = x[AVX512VP2INTERSECT-31] - _ = x[AVX512VPOPCNTDQ-32] - _ = x[AVXIFMA-33] - _ = x[AVXNECONVERT-34] - _ = x[AVXSLOW-35] - _ = x[AVXVNNI-36] - _ = x[AVXVNNIINT8-37] - _ = x[AVXVNNIINT16-38] - _ = x[BHI_CTRL-39] - _ = x[BMI1-40] - _ = x[BMI2-41] - _ = x[CETIBT-42] - _ = x[CETSS-43] - _ = x[CLDEMOTE-44] - _ = x[CLMUL-45] - _ = x[CLZERO-46] - _ = x[CMOV-47] - _ = x[CMPCCXADD-48] - _ = x[CMPSB_SCADBS_SHORT-49] - _ = x[CMPXCHG8-50] - _ = x[CPBOOST-51] - _ = x[CPPC-52] - _ = x[CX16-53] - _ = x[EFER_LMSLE_UNS-54] - _ = x[ENQCMD-55] - _ = x[ERMS-56] - _ = x[F16C-57] - _ = x[FLUSH_L1D-58] - _ = x[FMA3-59] - _ = x[FMA4-60] - _ = x[FP128-61] - _ = x[FP256-62] - _ = x[FSRM-63] - _ = x[FXSR-64] - _ = x[FXSROPT-65] - _ = x[GFNI-66] - _ = x[HLE-67] - _ = x[HRESET-68] - _ = x[HTT-69] - _ = x[HWA-70] - _ = x[HYBRID_CPU-71] - _ = x[HYPERVISOR-72] - _ = x[IA32_ARCH_CAP-73] - _ = x[IA32_CORE_CAP-74] - _ = x[IBPB-75] - _ = x[IBPB_BRTYPE-76] - _ = x[IBRS-77] - _ = x[IBRS_PREFERRED-78] - _ = x[IBRS_PROVIDES_SMP-79] - _ = x[IBS-80] - _ = x[IBSBRNTRGT-81] - _ = x[IBSFETCHSAM-82] - _ = x[IBSFFV-83] - _ = x[IBSOPCNT-84] - _ = x[IBSOPCNTEXT-85] - _ = x[IBSOPSAM-86] - _ = x[IBSRDWROPCNT-87] - _ = x[IBSRIPINVALIDCHK-88] - _ = x[IBS_FETCH_CTLX-89] - _ = x[IBS_OPDATA4-90] - _ = x[IBS_OPFUSE-91] - _ = x[IBS_PREVENTHOST-92] - _ = x[IBS_ZEN4-93] - _ = x[IDPRED_CTRL-94] - _ = x[INT_WBINVD-95] - _ = x[INVLPGB-96] - _ = x[KEYLOCKER-97] - _ = x[KEYLOCKERW-98] - _ = x[LAHF-99] - _ = x[LAM-100] - _ = x[LBRVIRT-101] - _ = x[LZCNT-102] - _ = x[MCAOVERFLOW-103] - _ = x[MCDT_NO-104] - _ = x[MCOMMIT-105] - _ = x[MD_CLEAR-106] - _ = x[MMX-107] - _ = x[MMXEXT-108] - _ = x[MOVBE-109] - _ = x[MOVDIR64B-110] - _ = x[MOVDIRI-111] - _ = x[MOVSB_ZL-112] - _ = x[MOVU-113] - _ = x[MPX-114] - _ = x[MSRIRC-115] - _ = x[MSRLIST-116] - _ = x[MSR_PAGEFLUSH-117] - _ = x[NRIPS-118] - _ = x[NX-119] - _ = x[OSXSAVE-120] - _ = x[PCONFIG-121] - _ = x[POPCNT-122] - _ = x[PPIN-123] - _ = x[PREFETCHI-124] - _ = x[PSFD-125] - _ = x[RDPRU-126] - _ = x[RDRAND-127] - _ = x[RDSEED-128] - _ = x[RDTSCP-129] - _ = x[RRSBA_CTRL-130] - _ = x[RTM-131] - _ = x[RTM_ALWAYS_ABORT-132] - _ = x[SBPB-133] - _ = x[SERIALIZE-134] - _ = x[SEV-135] - _ = x[SEV_64BIT-136] - _ = x[SEV_ALTERNATIVE-137] - _ = x[SEV_DEBUGSWAP-138] - _ = x[SEV_ES-139] - _ = x[SEV_RESTRICTED-140] - _ = x[SEV_SNP-141] - _ = x[SGX-142] - _ = x[SGXLC-143] - _ = x[SHA-144] - _ = x[SME-145] - _ = x[SME_COHERENT-146] - _ = x[SPEC_CTRL_SSBD-147] - _ = x[SRBDS_CTRL-148] - _ = x[SRSO_MSR_FIX-149] - _ = x[SRSO_NO-150] - _ = x[SRSO_USER_KERNEL_NO-151] - _ = x[SSE-152] - _ = x[SSE2-153] - _ = x[SSE3-154] - _ = x[SSE4-155] - _ = x[SSE42-156] - _ = x[SSE4A-157] - _ = x[SSSE3-158] - _ = x[STIBP-159] - _ = x[STIBP_ALWAYSON-160] - _ = x[STOSB_SHORT-161] - _ = x[SUCCOR-162] - _ = x[SVM-163] - _ = x[SVMDA-164] - _ = x[SVMFBASID-165] - _ = x[SVML-166] - _ = x[SVMNP-167] - _ = x[SVMPF-168] - _ = x[SVMPFT-169] - _ = x[SYSCALL-170] - _ = x[SYSEE-171] - _ = x[TBM-172] - _ = x[TDX_GUEST-173] - _ = x[TLB_FLUSH_NESTED-174] - _ = x[TME-175] - _ = x[TOPEXT-176] - _ = x[TSCRATEMSR-177] - _ = x[TSXLDTRK-178] - _ = x[VAES-179] - _ = x[VMCBCLEAN-180] - _ = x[VMPL-181] - _ = x[VMSA_REGPROT-182] - _ = x[VMX-183] - _ = x[VPCLMULQDQ-184] - _ = x[VTE-185] - _ = x[WAITPKG-186] - _ = x[WBNOINVD-187] - _ = x[WRMSRNS-188] - _ = x[X87-189] - _ = x[XGETBV1-190] - _ = x[XOP-191] - _ = x[XSAVE-192] - _ = x[XSAVEC-193] - _ = x[XSAVEOPT-194] - _ = x[XSAVES-195] - _ = x[AESARM-196] - _ = x[ARMCPUID-197] - _ = x[ASIMD-198] - _ = x[ASIMDDP-199] - _ = x[ASIMDHP-200] - _ = x[ASIMDRDM-201] - _ = x[ATOMICS-202] - _ = x[CRC32-203] - _ = x[DCPOP-204] - _ = x[EVTSTRM-205] - _ = x[FCMA-206] - _ = x[FP-207] - _ = x[FPHP-208] - _ = x[GPA-209] - _ = x[JSCVT-210] - _ = x[LRCPC-211] - _ = x[PMULL-212] - _ = x[SHA1-213] - _ = x[SHA2-214] - _ = x[SHA3-215] - _ = x[SHA512-216] - _ = x[SM3-217] - _ = x[SM4-218] - _ = x[SVE-219] - _ = x[lastID-220] + _ = x[AMXTF32-10] + _ = x[AMXCOMPLEX-11] + _ = x[APX_F-12] + _ = x[AVX-13] + _ = x[AVX10-14] + _ = x[AVX10_128-15] + _ = x[AVX10_256-16] + _ = x[AVX10_512-17] + _ = x[AVX2-18] + _ = x[AVX512BF16-19] + _ = x[AVX512BITALG-20] + _ = x[AVX512BW-21] + _ = x[AVX512CD-22] + _ = x[AVX512DQ-23] + _ = x[AVX512ER-24] + _ = x[AVX512F-25] + _ = x[AVX512FP16-26] + _ = x[AVX512IFMA-27] + _ = x[AVX512PF-28] + _ = x[AVX512VBMI-29] + _ = x[AVX512VBMI2-30] + _ = x[AVX512VL-31] + _ = x[AVX512VNNI-32] + _ = x[AVX512VP2INTERSECT-33] + _ = x[AVX512VPOPCNTDQ-34] + _ = x[AVXIFMA-35] + _ = x[AVXNECONVERT-36] + _ = x[AVXSLOW-37] + _ = x[AVXVNNI-38] + _ = x[AVXVNNIINT8-39] + _ = x[AVXVNNIINT16-40] + _ = x[BHI_CTRL-41] + _ = x[BMI1-42] + _ = x[BMI2-43] + _ = x[CETIBT-44] + _ = x[CETSS-45] + _ = x[CLDEMOTE-46] + _ = x[CLMUL-47] + _ = x[CLZERO-48] + _ = x[CMOV-49] + _ = x[CMPCCXADD-50] + _ = x[CMPSB_SCADBS_SHORT-51] + _ = x[CMPXCHG8-52] + _ = x[CPBOOST-53] + _ = x[CPPC-54] + _ = x[CX16-55] + _ = x[EFER_LMSLE_UNS-56] + _ = x[ENQCMD-57] + _ = x[ERMS-58] + _ = x[F16C-59] + _ = x[FLUSH_L1D-60] + _ = x[FMA3-61] + _ = x[FMA4-62] + _ = x[FP128-63] + _ = x[FP256-64] + _ = x[FSRM-65] + _ = x[FXSR-66] + _ = x[FXSROPT-67] + _ = x[GFNI-68] + _ = x[HLE-69] + _ = x[HRESET-70] + _ = x[HTT-71] + _ = x[HWA-72] + _ = x[HYBRID_CPU-73] + _ = x[HYPERVISOR-74] + _ = x[IA32_ARCH_CAP-75] + _ = x[IA32_CORE_CAP-76] + _ = x[IBPB-77] + _ = x[IBPB_BRTYPE-78] + _ = x[IBRS-79] + _ = x[IBRS_PREFERRED-80] + _ = x[IBRS_PROVIDES_SMP-81] + _ = x[IBS-82] + _ = x[IBSBRNTRGT-83] + _ = x[IBSFETCHSAM-84] + _ = x[IBSFFV-85] + _ = x[IBSOPCNT-86] + _ = x[IBSOPCNTEXT-87] + _ = x[IBSOPSAM-88] + _ = x[IBSRDWROPCNT-89] + _ = x[IBSRIPINVALIDCHK-90] + _ = x[IBS_FETCH_CTLX-91] + _ = x[IBS_OPDATA4-92] + _ = x[IBS_OPFUSE-93] + _ = x[IBS_PREVENTHOST-94] + _ = x[IBS_ZEN4-95] + _ = x[IDPRED_CTRL-96] + _ = x[INT_WBINVD-97] + _ = x[INVLPGB-98] + _ = x[KEYLOCKER-99] + _ = x[KEYLOCKERW-100] + _ = x[LAHF-101] + _ = x[LAM-102] + _ = x[LBRVIRT-103] + _ = x[LZCNT-104] + _ = x[MCAOVERFLOW-105] + _ = x[MCDT_NO-106] + _ = x[MCOMMIT-107] + _ = x[MD_CLEAR-108] + _ = x[MMX-109] + _ = x[MMXEXT-110] + _ = x[MOVBE-111] + _ = x[MOVDIR64B-112] + _ = x[MOVDIRI-113] + _ = x[MOVSB_ZL-114] + _ = x[MOVU-115] + _ = x[MPX-116] + _ = x[MSRIRC-117] + _ = x[MSRLIST-118] + _ = x[MSR_PAGEFLUSH-119] + _ = x[NRIPS-120] + _ = x[NX-121] + _ = x[OSXSAVE-122] + _ = x[PCONFIG-123] + _ = x[POPCNT-124] + _ = x[PPIN-125] + _ = x[PREFETCHI-126] + _ = x[PSFD-127] + _ = x[RDPRU-128] + _ = x[RDRAND-129] + _ = x[RDSEED-130] + _ = x[RDTSCP-131] + _ = x[RRSBA_CTRL-132] + _ = x[RTM-133] + _ = x[RTM_ALWAYS_ABORT-134] + _ = x[SBPB-135] + _ = x[SERIALIZE-136] + _ = x[SEV-137] + _ = x[SEV_64BIT-138] + _ = x[SEV_ALTERNATIVE-139] + _ = x[SEV_DEBUGSWAP-140] + _ = x[SEV_ES-141] + _ = x[SEV_RESTRICTED-142] + _ = x[SEV_SNP-143] + _ = x[SGX-144] + _ = x[SGXLC-145] + _ = x[SHA-146] + _ = x[SME-147] + _ = x[SME_COHERENT-148] + _ = x[SPEC_CTRL_SSBD-149] + _ = x[SRBDS_CTRL-150] + _ = x[SRSO_MSR_FIX-151] + _ = x[SRSO_NO-152] + _ = x[SRSO_USER_KERNEL_NO-153] + _ = x[SSE-154] + _ = x[SSE2-155] + _ = x[SSE3-156] + _ = x[SSE4-157] + _ = x[SSE42-158] + _ = x[SSE4A-159] + _ = x[SSSE3-160] + _ = x[STIBP-161] + _ = x[STIBP_ALWAYSON-162] + _ = x[STOSB_SHORT-163] + _ = x[SUCCOR-164] + _ = x[SVM-165] + _ = x[SVMDA-166] + _ = x[SVMFBASID-167] + _ = x[SVML-168] + _ = x[SVMNP-169] + _ = x[SVMPF-170] + _ = x[SVMPFT-171] + _ = x[SYSCALL-172] + _ = x[SYSEE-173] + _ = x[TBM-174] + _ = x[TDX_GUEST-175] + _ = x[TLB_FLUSH_NESTED-176] + _ = x[TME-177] + _ = x[TOPEXT-178] + _ = x[TSCRATEMSR-179] + _ = x[TSXLDTRK-180] + _ = x[VAES-181] + _ = x[VMCBCLEAN-182] + _ = x[VMPL-183] + _ = x[VMSA_REGPROT-184] + _ = x[VMX-185] + _ = x[VPCLMULQDQ-186] + _ = x[VTE-187] + _ = x[WAITPKG-188] + _ = x[WBNOINVD-189] + _ = x[WRMSRNS-190] + _ = x[X87-191] + _ = x[XGETBV1-192] + _ = x[XOP-193] + _ = x[XSAVE-194] + _ = x[XSAVEC-195] + _ = x[XSAVEOPT-196] + _ = x[XSAVES-197] + _ = x[AESARM-198] + _ = x[ARMCPUID-199] + _ = x[ASIMD-200] + _ = x[ASIMDDP-201] + _ = x[ASIMDHP-202] + _ = x[ASIMDRDM-203] + _ = x[ATOMICS-204] + _ = x[CRC32-205] + _ = x[DCPOP-206] + _ = x[EVTSTRM-207] + _ = x[FCMA-208] + _ = x[FHM-209] + _ = x[FP-210] + _ = x[FPHP-211] + _ = x[GPA-212] + _ = x[JSCVT-213] + _ = x[LRCPC-214] + _ = x[PMULL-215] + _ = x[RNDR-216] + _ = x[TLB-217] + _ = x[TS-218] + _ = x[SHA1-219] + _ = x[SHA2-220] + _ = x[SHA3-221] + _ = x[SHA512-222] + _ = x[SM3-223] + _ = x[SM4-224] + _ = x[SVE-225] + _ = x[lastID-226] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAMXTF32AMXCOMPLEXAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFHMFPFPHPGPAJSCVTLRCPCPMULLRNDRTLBTSSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 73, 76, 81, 90, 99, 108, 112, 122, 134, 142, 150, 158, 166, 173, 183, 193, 201, 211, 222, 230, 240, 258, 273, 280, 292, 299, 306, 317, 329, 337, 341, 345, 351, 356, 364, 369, 375, 379, 388, 406, 414, 421, 425, 429, 443, 449, 453, 457, 466, 470, 474, 479, 484, 488, 492, 499, 503, 506, 512, 515, 518, 528, 538, 551, 564, 568, 579, 583, 597, 614, 617, 627, 638, 644, 652, 663, 671, 683, 699, 713, 724, 734, 749, 757, 768, 778, 785, 794, 804, 808, 811, 818, 823, 834, 841, 848, 856, 859, 865, 870, 879, 886, 894, 898, 901, 907, 914, 927, 932, 934, 941, 948, 954, 958, 967, 971, 976, 982, 988, 994, 1004, 1007, 1023, 1027, 1036, 1039, 1048, 1063, 1076, 1082, 1096, 1103, 1106, 1111, 1114, 1117, 1129, 1143, 1153, 1165, 1172, 1191, 1194, 1198, 1202, 1206, 1211, 1216, 1221, 1226, 1240, 1251, 1257, 1260, 1265, 1274, 1278, 1283, 1288, 1294, 1301, 1306, 1309, 1318, 1334, 1337, 1343, 1353, 1361, 1365, 1374, 1378, 1390, 1393, 1403, 1406, 1413, 1421, 1428, 1431, 1438, 1441, 1446, 1452, 1460, 1466, 1472, 1480, 1485, 1492, 1499, 1507, 1514, 1519, 1524, 1531, 1535, 1537, 1541, 1544, 1549, 1554, 1559, 1563, 1567, 1571, 1577, 1580, 1583, 1586, 1592} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 75, 85, 90, 93, 98, 107, 116, 125, 129, 139, 151, 159, 167, 175, 183, 190, 200, 210, 218, 228, 239, 247, 257, 275, 290, 297, 309, 316, 323, 334, 346, 354, 358, 362, 368, 373, 381, 386, 392, 396, 405, 423, 431, 438, 442, 446, 460, 466, 470, 474, 483, 487, 491, 496, 501, 505, 509, 516, 520, 523, 529, 532, 535, 545, 555, 568, 581, 585, 596, 600, 614, 631, 634, 644, 655, 661, 669, 680, 688, 700, 716, 730, 741, 751, 766, 774, 785, 795, 802, 811, 821, 825, 828, 835, 840, 851, 858, 865, 873, 876, 882, 887, 896, 903, 911, 915, 918, 924, 931, 944, 949, 951, 958, 965, 971, 975, 984, 988, 993, 999, 1005, 1011, 1021, 1024, 1040, 1044, 1053, 1056, 1065, 1080, 1093, 1099, 1113, 1120, 1123, 1128, 1131, 1134, 1146, 1160, 1170, 1182, 1189, 1208, 1211, 1215, 1219, 1223, 1228, 1233, 1238, 1243, 1257, 1268, 1274, 1277, 1282, 1291, 1295, 1300, 1305, 1311, 1318, 1323, 1326, 1335, 1351, 1354, 1360, 1370, 1378, 1382, 1391, 1395, 1407, 1410, 1420, 1423, 1430, 1438, 1445, 1448, 1455, 1458, 1463, 1469, 1477, 1483, 1489, 1497, 1502, 1509, 1516, 1524, 1531, 1536, 1541, 1548, 1552, 1555, 1557, 1561, 1564, 1569, 1574, 1579, 1583, 1586, 1588, 1592, 1596, 1600, 1606, 1609, 1612, 1615, 1621} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go index 84b1acd21..6f0b33ca6 100644 --- a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go +++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go @@ -96,9 +96,11 @@ func tryToFillCPUInfoFomSysctl(c *CPUInfo) { setFeature(c, "hw.optional.arm.FEAT_DPB", DCPOP) // setFeature(c, "", EVTSTRM) setFeature(c, "hw.optional.arm.FEAT_FCMA", FCMA) + setFeature(c, "hw.optional.arm.FEAT_FHM", FHM) setFeature(c, "hw.optional.arm.FEAT_FP", FP) setFeature(c, "hw.optional.arm.FEAT_FP16", FPHP) setFeature(c, "hw.optional.arm.FEAT_PAuth", GPA) + setFeature(c, "hw.optional.arm.FEAT_RNG", RNDR) setFeature(c, "hw.optional.arm.FEAT_JSCVT", JSCVT) setFeature(c, "hw.optional.arm.FEAT_LRCPC", LRCPC) setFeature(c, "hw.optional.arm.FEAT_PMULL", PMULL) @@ -106,6 +108,10 @@ func tryToFillCPUInfoFomSysctl(c *CPUInfo) { setFeature(c, "hw.optional.arm.FEAT_SHA256", SHA2) setFeature(c, "hw.optional.arm.FEAT_SHA3", SHA3) setFeature(c, "hw.optional.arm.FEAT_SHA512", SHA512) + setFeature(c, "hw.optional.arm.FEAT_TLBIOS", TLB) + setFeature(c, "hw.optional.arm.FEAT_TLBIRANGE", TLB) + setFeature(c, "hw.optional.arm.FEAT_FlagM", TS) + setFeature(c, "hw.optional.arm.FEAT_FlagM2", TS) // setFeature(c, "", SM3) // setFeature(c, "", SM4) setFeature(c, "hw.optional.arm.FEAT_SVE", SVE) diff --git a/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go index ee278b9e4..d96d24438 100644 --- a/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go +++ b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go @@ -39,6 +39,80 @@ const ( hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 + hwcap_DIT = 1 << 24 + hwcap_USCAT = 1 << 25 + hwcap_ILRCPC = 1 << 26 + hwcap_FLAGM = 1 << 27 + hwcap_SSBS = 1 << 28 + hwcap_SB = 1 << 29 + hwcap_PACA = 1 << 30 + hwcap_PACG = 1 << 31 + hwcap_GCS = 1 << 32 + + hwcap2_DCPODP = 1 << 0 + hwcap2_SVE2 = 1 << 1 + hwcap2_SVEAES = 1 << 2 + hwcap2_SVEPMULL = 1 << 3 + hwcap2_SVEBITPERM = 1 << 4 + hwcap2_SVESHA3 = 1 << 5 + hwcap2_SVESM4 = 1 << 6 + hwcap2_FLAGM2 = 1 << 7 + hwcap2_FRINT = 1 << 8 + hwcap2_SVEI8MM = 1 << 9 + hwcap2_SVEF32MM = 1 << 10 + hwcap2_SVEF64MM = 1 << 11 + hwcap2_SVEBF16 = 1 << 12 + hwcap2_I8MM = 1 << 13 + hwcap2_BF16 = 1 << 14 + hwcap2_DGH = 1 << 15 + hwcap2_RNG = 1 << 16 + hwcap2_BTI = 1 << 17 + hwcap2_MTE = 1 << 18 + hwcap2_ECV = 1 << 19 + hwcap2_AFP = 1 << 20 + hwcap2_RPRES = 1 << 21 + hwcap2_MTE3 = 1 << 22 + hwcap2_SME = 1 << 23 + hwcap2_SME_I16I64 = 1 << 24 + hwcap2_SME_F64F64 = 1 << 25 + hwcap2_SME_I8I32 = 1 << 26 + hwcap2_SME_F16F32 = 1 << 27 + hwcap2_SME_B16F32 = 1 << 28 + hwcap2_SME_F32F32 = 1 << 29 + hwcap2_SME_FA64 = 1 << 30 + hwcap2_WFXT = 1 << 31 + hwcap2_EBF16 = 1 << 32 + hwcap2_SVE_EBF16 = 1 << 33 + hwcap2_CSSC = 1 << 34 + hwcap2_RPRFM = 1 << 35 + hwcap2_SVE2P1 = 1 << 36 + hwcap2_SME2 = 1 << 37 + hwcap2_SME2P1 = 1 << 38 + hwcap2_SME_I16I32 = 1 << 39 + hwcap2_SME_BI32I32 = 1 << 40 + hwcap2_SME_B16B16 = 1 << 41 + hwcap2_SME_F16F16 = 1 << 42 + hwcap2_MOPS = 1 << 43 + hwcap2_HBC = 1 << 44 + hwcap2_SVE_B16B16 = 1 << 45 + hwcap2_LRCPC3 = 1 << 46 + hwcap2_LSE128 = 1 << 47 + hwcap2_FPMR = 1 << 48 + hwcap2_LUT = 1 << 49 + hwcap2_FAMINMAX = 1 << 50 + hwcap2_F8CVT = 1 << 51 + hwcap2_F8FMA = 1 << 52 + hwcap2_F8DP4 = 1 << 53 + hwcap2_F8DP2 = 1 << 54 + hwcap2_F8E4M3 = 1 << 55 + hwcap2_F8E5M2 = 1 << 56 + hwcap2_SME_LUTV2 = 1 << 57 + hwcap2_SME_F8F16 = 1 << 58 + hwcap2_SME_F8F32 = 1 << 59 + hwcap2_SME_SF8FMA = 1 << 60 + hwcap2_SME_SF8DP4 = 1 << 61 + hwcap2_SME_SF8DP2 = 1 << 62 + hwcap2_POE = 1 << 63 ) func detectOS(c *CPUInfo) bool { @@ -104,11 +178,15 @@ func detectOS(c *CPUInfo) bool { c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP) c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM) c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDFHM), FHM) c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP) c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP) c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT) c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC) c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL) + c.featureSet.setIf(isSet(hwcap, hwcap2_RNG), RNDR) + // c.featureSet.setIf(isSet(hwcap, hwcap_), TLB) + // c.featureSet.setIf(isSet(hwcap, hwcap_), TS) c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1) c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2) c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3) diff --git a/vendor/github.com/redis/go-redis/v9/.gitignore b/vendor/github.com/redis/go-redis/v9/.gitignore index 6f868895b..00710d507 100644 --- a/vendor/github.com/redis/go-redis/v9/.gitignore +++ b/vendor/github.com/redis/go-redis/v9/.gitignore @@ -3,4 +3,13 @@ testdata/* .idea/ .DS_Store *.tar.gz -*.dic \ No newline at end of file +*.dic +redis8tests.sh +coverage.txt +**/coverage.txt +.vscode +tmp/* +*.test + +# maintenanceNotifications upgrade documentation (temporary) +maintenanceNotifications/docs/ diff --git a/vendor/github.com/redis/go-redis/v9/.golangci.yml b/vendor/github.com/redis/go-redis/v9/.golangci.yml index 285aca6b3..872454ff7 100644 --- a/vendor/github.com/redis/go-redis/v9/.golangci.yml +++ b/vendor/github.com/redis/go-redis/v9/.golangci.yml @@ -1,3 +1,34 @@ +version: "2" run: timeout: 5m tests: false +linters: + settings: + staticcheck: + checks: + - all + # Incorrect or missing package comment. + # https://staticcheck.dev/docs/checks/#ST1000 + - -ST1000 + # Omit embedded fields from selector expression. + # https://staticcheck.dev/docs/checks/#QF1008 + - -QF1008 + - -ST1003 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/redis/go-redis/v9/CHANGELOG.md b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md deleted file mode 100644 index e1652b179..000000000 --- a/vendor/github.com/redis/go-redis/v9/CHANGELOG.md +++ /dev/null @@ -1,133 +0,0 @@ -## Unreleased - -### Changed - -* `go-redis` won't skip span creation if the parent spans is not recording. ([#2980](https://github.com/redis/go-redis/issues/2980)) - Users can use the OpenTelemetry sampler to control the sampling behavior. - For instance, you can use the `ParentBased(NeverSample())` sampler from `go.opentelemetry.io/otel/sdk/trace` to keep - a similar behavior (drop orphan spans) of `go-redis` as before. - -## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29) - - -### Features - -* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602)) -* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe)) -* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af)) - - - -## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01) - - -### Bug Fixes - -* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241)) - - -### Features - -* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e)) -* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8)) -* read the structure to increase the judgment of the omitempty op… ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af)) - - - -## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02) - -### New Features - -- feat(scan): scan time.Time sets the default decoding (#2413) -- Add support for CLUSTER LINKS command (#2504) -- Add support for acl dryrun command (#2502) -- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500) -- Add support for LCS Command (#2480) -- Add support for BZMPOP (#2456) -- Adding support for ZMPOP command (#2408) -- Add support for LMPOP (#2440) -- feat: remove pool unused fields (#2438) -- Expiretime and PExpireTime (#2426) -- Implement `FUNCTION` group of commands (#2475) -- feat(zadd): add ZAddLT and ZAddGT (#2429) -- Add: Support for COMMAND LIST command (#2491) -- Add support for BLMPOP (#2442) -- feat: check pipeline.Do to prevent confusion with Exec (#2517) -- Function stats, function kill, fcall and fcall_ro (#2486) -- feat: Add support for CLUSTER SHARDS command (#2507) -- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498) - -### Fixed - -- fix: eval api cmd.SetFirstKeyPos (#2501) -- fix: limit the number of connections created (#2441) -- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479) -- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458) -- fix: group lag can be null (#2448) - -### Maintenance - -- Updating to the latest version of redis (#2508) -- Allowing for running tests on a port other than the fixed 6380 (#2466) -- redis 7.0.8 in tests (#2450) -- docs: Update redisotel example for v9 (#2425) -- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476) -- chore: add Chinese translation (#2436) -- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421) -- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420) -- chore(deps): bump actions/setup-go from 3 to 4 (#2495) -- docs: add instructions for the HSet api (#2503) -- docs: add reading lag field comment (#2451) -- test: update go mod before testing(go mod tidy) (#2423) -- docs: fix comment typo (#2505) -- test: remove testify (#2463) -- refactor: change ListElementCmd to KeyValuesCmd. (#2443) -- fix(appendArg): appendArg case special type (#2489) - -## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01) - -### Features - -* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65)) - -## v9 2023-01-30 - -### Breaking - -- Changed Pipelines to not be thread-safe any more. - -### Added - -- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was - contributed by @monkey92t who has done the majority of work in this release. -- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts - and deadlines. See - [Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details. -- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example, - `redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`. -- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See - [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html) -- Added `redis.HasErrorPrefix` to help working with errors. - -### Changed - -- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is - completely gone in v9. -- Reworked hook interface and added `DialHook`. -- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See - [example](example/otel) and - [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html). -- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making - an allocation. -- Renamed the option `MaxConnAge` to `ConnMaxLifetime`. -- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`. -- Removed connection reaper in favor of `MaxIdleConns`. -- Removed `WithContext` since `context.Context` can be passed directly as an arg. -- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and - it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to - reset commands for some reason. - -### Fixed - -- Improved and fixed pipeline retries. -- As usually, added support for more commands and fixed some bugs. diff --git a/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md b/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md index 90030b89f..8c68c522e 100644 --- a/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md +++ b/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md @@ -32,20 +32,33 @@ Here's how to get started with your code contribution: 1. Create your own fork of go-redis 2. Do the changes in your fork -3. If you need a development environment, run `make test`. Note: this clones and builds the latest release of [redis](https://redis.io). You also need a redis-stack-server docker, in order to run the capabilities tests. This can be started by running: - ```docker run -p 6379:6379 -it redis/redis-stack-server:edge``` -4. While developing, make sure the tests pass by running `make tests` +3. If you need a development environment, run `make docker.start`. + +> Note: this clones and builds the docker containers specified in `docker-compose.yml`, to understand more about +> the infrastructure that will be started you can check the `docker-compose.yml`. You also have the possiblity +> to specify the redis image that will be pulled with the env variable `CLIENT_LIBS_TEST_IMAGE`. +> By default the docker image that will be pulled and started is `redislabs/client-libs-test:8.2.1-pre`. +> If you want to test with newer Redis version, using a newer version of `redislabs/client-libs-test` should work out of the box. + +4. While developing, make sure the tests pass by running `make test` (if you have the docker containers running, `make test.ci` may be sufficient). +> Note: `make test` will try to start all containers, run the tests with `make test.ci` and then stop all containers. 5. If you like the change and think the project could use it, send a pull request To see what else is part of the automation, run `invoke -l` + ## Testing -Call `make test` to run all tests, including linters. +### Setting up Docker +To run the tests, you need to have Docker installed and running. If you are using a host OS that does not support +docker host networks out of the box (e.g. Windows, OSX), you need to set up a docker desktop and enable docker host networks. + +### Running tests +Call `make test` to run all tests. Continuous Integration uses these same wrappers to run all of these -tests against multiple versions of python. Feel free to test your +tests against multiple versions of redis. Feel free to test your changes against all the go versions supported, as declared by the [build.yml](./.github/workflows/build.yml) file. @@ -99,3 +112,7 @@ The core team regularly looks at pull requests. We will provide feedback as soon as possible. After receiving our feedback, please respond within two weeks. After that time, we may close your PR if it isn't showing any activity. + +## Support + +Maintainers can provide limited support to contributors on discord: https://discord.gg/W4txy5AeKM diff --git a/vendor/github.com/redis/go-redis/v9/Makefile b/vendor/github.com/redis/go-redis/v9/Makefile index d8d007596..c2264a4e3 100644 --- a/vendor/github.com/redis/go-redis/v9/Makefile +++ b/vendor/github.com/redis/go-redis/v9/Makefile @@ -1,41 +1,79 @@ GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +REDIS_VERSION ?= 8.4 +RE_CLUSTER ?= false +RCE_DOCKER ?= true +CLIENT_LIBS_TEST_IMAGE ?= redislabs/client-libs-test:8.4.0 -test: testdeps - $(eval GO_VERSION := $(shell go version | cut -d " " -f 3 | cut -d. -f2)) +docker.start: + export RE_CLUSTER=$(RE_CLUSTER) && \ + export RCE_DOCKER=$(RCE_DOCKER) && \ + export REDIS_VERSION=$(REDIS_VERSION) && \ + export CLIENT_LIBS_TEST_IMAGE=$(CLIENT_LIBS_TEST_IMAGE) && \ + docker compose --profile all up -d --quiet-pull + +docker.stop: + docker compose --profile all down + +test: + $(MAKE) docker.start + @if [ -z "$(REDIS_VERSION)" ]; then \ + echo "REDIS_VERSION not set, running all tests"; \ + $(MAKE) test.ci; \ + else \ + MAJOR_VERSION=$$(echo "$(REDIS_VERSION)" | cut -d. -f1); \ + if [ "$$MAJOR_VERSION" -ge 8 ]; then \ + echo "REDIS_VERSION $(REDIS_VERSION) >= 8, running all tests"; \ + $(MAKE) test.ci; \ + else \ + echo "REDIS_VERSION $(REDIS_VERSION) < 8, skipping vector_sets tests"; \ + $(MAKE) test.ci.skip-vectorsets; \ + fi; \ + fi + $(MAKE) docker.stop + +test.ci: set -e; for dir in $(GO_MOD_DIRS); do \ - if echo "$${dir}" | grep -q "./example" && [ "$(GO_VERSION)" = "19" ]; then \ - echo "Skipping go test in $${dir} due to Go version 1.19 and dir contains ./example"; \ - continue; \ - fi; \ echo "go test in $${dir}"; \ (cd "$${dir}" && \ + export RE_CLUSTER=$(RE_CLUSTER) && \ + export RCE_DOCKER=$(RCE_DOCKER) && \ + export REDIS_VERSION=$(REDIS_VERSION) && \ go mod tidy -compat=1.18 && \ - go test && \ - go test ./... -short -race && \ - go test ./... -run=NONE -bench=. -benchmem && \ - env GOOS=linux GOARCH=386 go test && \ - go vet); \ + go vet && \ + go test -v -coverprofile=coverage.txt -covermode=atomic ./... -race -skip Example); \ done cd internal/customvet && go build . go vet -vettool ./internal/customvet/customvet -testdeps: testdata/redis/src/redis-server +test.ci.skip-vectorsets: + set -e; for dir in $(GO_MOD_DIRS); do \ + echo "go test in $${dir} (skipping vector sets)"; \ + (cd "$${dir}" && \ + export RE_CLUSTER=$(RE_CLUSTER) && \ + export RCE_DOCKER=$(RCE_DOCKER) && \ + export REDIS_VERSION=$(REDIS_VERSION) && \ + go mod tidy -compat=1.18 && \ + go vet && \ + go test -v -coverprofile=coverage.txt -covermode=atomic ./... -race \ + -run '^(?!.*(?:VectorSet|vectorset|ExampleClient_vectorset)).*$$' -skip Example); \ + done + cd internal/customvet && go build . + go vet -vettool ./internal/customvet/customvet -bench: testdeps - go test ./... -test.run=NONE -test.bench=. -test.benchmem +bench: + export RE_CLUSTER=$(RE_CLUSTER) && \ + export RCE_DOCKER=$(RCE_DOCKER) && \ + export REDIS_VERSION=$(REDIS_VERSION) && \ + go test ./... -test.run=NONE -test.bench=. -test.benchmem -skip Example -.PHONY: all test testdeps bench fmt +.PHONY: all test test.ci test.ci.skip-vectorsets bench fmt build: + export RE_CLUSTER=$(RE_CLUSTER) && \ + export RCE_DOCKER=$(RCE_DOCKER) && \ + export REDIS_VERSION=$(REDIS_VERSION) && \ go build . -testdata/redis: - mkdir -p $@ - wget -qO- https://download.redis.io/releases/redis-7.4-rc2.tar.gz | tar xvz --strip-components=1 -C $@ - -testdata/redis/src/redis-server: testdata/redis - cd $< && make all - fmt: gofumpt -w ./ goimports -w -local github.com/redis/go-redis ./ diff --git a/vendor/github.com/redis/go-redis/v9/README.md b/vendor/github.com/redis/go-redis/v9/README.md index d2a8cd78a..38bd17b58 100644 --- a/vendor/github.com/redis/go-redis/v9/README.md +++ b/vendor/github.com/redis/go-redis/v9/README.md @@ -2,16 +2,32 @@ [![build workflow](https://github.com/redis/go-redis/actions/workflows/build.yml/badge.svg)](https://github.com/redis/go-redis/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/redis/go-redis/v9)](https://pkg.go.dev/github.com/redis/go-redis/v9?tab=doc) -[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/) -[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) +[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.io/docs/latest/develop/clients/go/) +[![Go Report Card](https://goreportcard.com/badge/github.com/redis/go-redis/v9)](https://goreportcard.com/report/github.com/redis/go-redis/v9) +[![codecov](https://codecov.io/github/redis/go-redis/graph/badge.svg?token=tsrCZKuSSw)](https://codecov.io/github/redis/go-redis) -> go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). -> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can -> use it to monitor applications and set up automatic alerts to receive notifications via email, -> Slack, Telegram, and others. -> -> See [OpenTelemetry](https://github.com/redis/go-redis/tree/master/example/otel) example which -> demonstrates how you can use Uptrace to monitor go-redis. +[![Discord](https://img.shields.io/discord/697882427875393627.svg?style=social&logo=discord)](https://discord.gg/W4txy5AeKM) +[![Twitch](https://img.shields.io/twitch/status/redisinc?style=social)](https://www.twitch.tv/redisinc) +[![YouTube](https://img.shields.io/youtube/channel/views/UCD78lHSwYqMlyetR0_P4Vig?style=social)](https://www.youtube.com/redisinc) +[![Twitter](https://img.shields.io/twitter/follow/redisinc?style=social)](https://twitter.com/redisinc) +[![Stack Exchange questions](https://img.shields.io/stackexchange/stackoverflow/t/go-redis?style=social&logo=stackoverflow&label=Stackoverflow)](https://stackoverflow.com/questions/tagged/go-redis) + +> go-redis is the official Redis client library for the Go programming language. It offers a straightforward interface for interacting with Redis servers. + +## Supported versions + +In `go-redis` we are aiming to support the last three releases of Redis. Currently, this means we do support: +- [Redis 8.0](https://raw.githubusercontent.com/redis/redis/8.0/00-RELEASENOTES) - using Redis CE 8.0 +- [Redis 8.2](https://raw.githubusercontent.com/redis/redis/8.2/00-RELEASENOTES) - using Redis CE 8.2 +- [Redis 8.4](https://raw.githubusercontent.com/redis/redis/8.4/00-RELEASENOTES) - using Redis CE 8.4 + +Although the `go.mod` states it requires at minimum `go 1.18`, our CI is configured to run the tests against all three +versions of Redis and latest two versions of Go ([1.23](https://go.dev/doc/devel/release#go1.23.0), +[1.24](https://go.dev/doc/devel/release#go1.24.0)). We observe that some modules related test may not pass with +Redis Stack 7.2 and some commands are changed with Redis CE 8.0. +Although it is not officially supported, `go-redis/v9` should be able to work with any Redis 7.0+. +Please do refer to the documentation and the tests if you experience any issues. We do plan to update the go version +in the `go.mod` to `go 1.24` in one of the next releases. ## How do I Redis? @@ -27,40 +43,39 @@ [Work at Redis](https://redis.com/company/careers/jobs/) -## Documentation - -- [English](https://redis.uptrace.dev) -- [简体中文](https://redis.uptrace.dev/zh/) ## Resources - [Discussions](https://github.com/redis/go-redis/discussions) -- [Chat](https://discord.gg/rWtp5Aj) +- [Chat](https://discord.gg/W4txy5AeKM) - [Reference](https://pkg.go.dev/github.com/redis/go-redis/v9) - [Examples](https://pkg.go.dev/github.com/redis/go-redis/v9#pkg-examples) +## old documentation + +- [English](https://redis.uptrace.dev) +- [简体中文](https://redis.uptrace.dev/zh/) + ## Ecosystem -- [Redis Mock](https://github.com/go-redis/redismock) +- [Entra ID (Azure AD)](https://github.com/redis/go-redis-entraid) - [Distributed Locks](https://github.com/bsm/redislock) - [Redis Cache](https://github.com/go-redis/cache) - [Rate limiting](https://github.com/go-redis/redis_rate) -This client also works with [Kvrocks](https://github.com/apache/incubator-kvrocks), a distributed -key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol. - ## Features - Redis commands except QUIT and SYNC. - Automatic connection pooling. +- [StreamingCredentialsProvider (e.g. entra id, oauth)](#1-streaming-credentials-provider-highest-priority) (experimental) - [Pub/Sub](https://redis.uptrace.dev/guide/go-redis-pubsub.html). - [Pipelines and transactions](https://redis.uptrace.dev/guide/go-redis-pipelines.html). - [Scripting](https://redis.uptrace.dev/guide/lua-scripting.html). - [Redis Sentinel](https://redis.uptrace.dev/guide/go-redis-sentinel.html). - [Redis Cluster](https://redis.uptrace.dev/guide/go-redis-cluster.html). -- [Redis Ring](https://redis.uptrace.dev/guide/ring.html). - [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html). - [Redis Probabilistic [RedisStack]](https://redis.io/docs/data-types/probabilistic/) +- [Customizable read and write buffers size.](#custom-buffer-sizes) ## Installation @@ -121,17 +136,121 @@ func ExampleClient() { } ``` -The above can be modified to specify the version of the RESP protocol by adding the `protocol` -option to the `Options` struct: +### Authentication + +The Redis client supports multiple ways to provide authentication credentials, with a clear priority order. Here are the available options: + +#### 1. Streaming Credentials Provider (Highest Priority) - Experimental feature + +The streaming credentials provider allows for dynamic credential updates during the connection lifetime. This is particularly useful for managed identity services and token-based authentication. ```go - rdb := redis.NewClient(&redis.Options{ - Addr: "localhost:6379", - Password: "", // no password set - DB: 0, // use default DB - Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3 - }) +type StreamingCredentialsProvider interface { + Subscribe(listener CredentialsListener) (Credentials, UnsubscribeFunc, error) +} + +type CredentialsListener interface { + OnNext(credentials Credentials) // Called when credentials are updated + OnError(err error) // Called when an error occurs +} + +type Credentials interface { + BasicAuth() (username string, password string) + RawCredentials() string +} +``` +Example usage: +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + StreamingCredentialsProvider: &MyCredentialsProvider{}, +}) +``` + +**Note:** The streaming credentials provider can be used with [go-redis-entraid](https://github.com/redis/go-redis-entraid) to enable Entra ID (formerly Azure AD) authentication. This allows for seamless integration with Azure's managed identity services and token-based authentication. + +Example with Entra ID: +```go +import ( + "github.com/redis/go-redis/v9" + "github.com/redis/go-redis-entraid" +) + +// Create an Entra ID credentials provider +provider := entraid.NewDefaultAzureIdentityProvider() + +// Configure Redis client with Entra ID authentication +rdb := redis.NewClient(&redis.Options{ + Addr: "your-redis-server.redis.cache.windows.net:6380", + StreamingCredentialsProvider: provider, + TLSConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, +}) +``` + +#### 2. Context-based Credentials Provider + +The context-based provider allows credentials to be determined at the time of each operation, using the context. + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + CredentialsProviderContext: func(ctx context.Context) (string, string, error) { + // Return username, password, and any error + return "user", "pass", nil + }, +}) +``` + +#### 3. Regular Credentials Provider + +A simple function-based provider that returns static credentials. + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + CredentialsProvider: func() (string, string) { + // Return username and password + return "user", "pass" + }, +}) +``` + +#### 4. Username/Password Fields (Lowest Priority) + +The most basic way to provide credentials is through the `Username` and `Password` fields in the options. + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Username: "user", + Password: "pass", +}) +``` + +#### Priority Order + +The client will use credentials in the following priority order: +1. Streaming Credentials Provider (if set) +2. Context-based Credentials Provider (if set) +3. Regular Credentials Provider (if set) +4. Username/Password fields (if set) + +If none of these are set, the client will attempt to connect without authentication. + +### Protocol Version + +The client supports both RESP2 and RESP3 protocols. You can specify the protocol version in the options: + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", // no password set + DB: 0, // use default DB + Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3 +}) ``` ### Connecting via a redis url @@ -158,6 +277,36 @@ func ExampleClient() *redis.Client { ``` +### Instrument with OpenTelemetry + +```go +import ( + "github.com/redis/go-redis/v9" + "github.com/redis/go-redis/extra/redisotel/v9" + "errors" +) + +func main() { + ... + rdb := redis.NewClient(&redis.Options{...}) + + if err := errors.Join(redisotel.InstrumentTracing(rdb), redisotel.InstrumentMetrics(rdb)); err != nil { + log.Fatal(err) + } +``` + + +### Buffer Size Configuration + +go-redis uses 32KiB read and write buffers by default for optimal performance. For high-throughput applications or large pipelines, you can customize buffer sizes: + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + ReadBufferSize: 1024 * 1024, // 1MiB read buffer + WriteBufferSize: 1024 * 1024, // 1MiB write buffer +}) +``` ### Advanced Configuration @@ -184,9 +333,63 @@ rdb := redis.NewClient(&redis.Options{ }) ``` -## Contributing +#### Unstable RESP3 Structures for RediSearch Commands +When integrating Redis with application functionalities using RESP3, it's important to note that some response structures aren't final yet. This is especially true for more complex structures like search and query results. We recommend using RESP2 when using the search and query capabilities, but we plan to stabilize the RESP3-based API-s in the coming versions. You can find more guidance in the upcoming release notes. + +To enable unstable RESP3, set the option in your client configuration: + +```go +redis.NewClient(&redis.Options{ + UnstableResp3: true, + }) +``` +**Note:** When UnstableResp3 mode is enabled, it's necessary to use RawResult() and RawVal() to retrieve a raw data. + Since, raw response is the only option for unstable search commands Val() and Result() calls wouldn't have any affect on them: + +```go +res1, err := client.FTSearchWithArgs(ctx, "txt", "foo bar", &redis.FTSearchOptions{}).RawResult() +val1 := client.FTSearchWithArgs(ctx, "txt", "foo bar", &redis.FTSearchOptions{}).RawVal() +``` + +#### Redis-Search Default Dialect + +In the Redis-Search module, **the default dialect is 2**. If needed, you can explicitly specify a different dialect using the appropriate configuration in your queries. + +**Important**: Be aware that the query dialect may impact the results returned. If needed, you can revert to a different dialect version by passing the desired dialect in the arguments of the command you want to execute. +For example: +``` + res2, err := rdb.FTSearchWithArgs(ctx, + "idx:bicycle", + "@pickup_zone:[CONTAINS $bike]", + &redis.FTSearchOptions{ + Params: map[string]interface{}{ + "bike": "POINT(-0.1278 51.5074)", + }, + DialectVersion: 3, + }, + ).Result() +``` +You can find further details in the [query dialect documentation](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/dialects/). + +#### Custom buffer sizes +Prior to v9.12, the buffer size was the default go value of 4096 bytes. Starting from v9.12, +go-redis uses 32KiB read and write buffers by default for optimal performance. +For high-throughput applications or large pipelines, you can customize buffer sizes: -Please see [out contributing guidelines](CONTRIBUTING.md) to help us improve this library! +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + ReadBufferSize: 1024 * 1024, // 1MiB read buffer + WriteBufferSize: 1024 * 1024, // 1MiB write buffer +}) +``` + +**Important**: If you experience any issues with the default buffer sizes, please try setting them to the go default of 4096 bytes. + +## Contributing +We welcome contributions to the go-redis library! If you have a bug fix, feature request, or improvement, please open an issue or pull request on GitHub. +We appreciate your help in making go-redis better for everyone. +If you are interested in contributing to the go-redis library, please check out our [contributing guidelines](CONTRIBUTING.md) for more information on how to get started. ## Look and feel @@ -223,38 +426,150 @@ vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello") res, err := rdb.Do(ctx, "set", "key", "value").Result() ``` -## Run the test - -go-redis will start a redis-server and run the test cases. +## Typed Errors -The paths of redis-server bin file and redis config file are defined in `main_test.go`: +go-redis provides typed error checking functions for common Redis errors: ```go -var ( - redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server")) - redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf")) -) +// Cluster and replication errors +redis.IsLoadingError(err) // Redis is loading the dataset +redis.IsReadOnlyError(err) // Write to read-only replica +redis.IsClusterDownError(err) // Cluster is down +redis.IsTryAgainError(err) // Command should be retried +redis.IsMasterDownError(err) // Master is down +redis.IsMovedError(err) // Returns (address, true) if key moved +redis.IsAskError(err) // Returns (address, true) if key being migrated + +// Connection and resource errors +redis.IsMaxClientsError(err) // Maximum clients reached +redis.IsAuthError(err) // Authentication failed (NOAUTH, WRONGPASS, unauthenticated) +redis.IsPermissionError(err) // Permission denied (NOPERM) +redis.IsOOMError(err) // Out of memory (OOM) + +// Transaction errors +redis.IsExecAbortError(err) // Transaction aborted (EXECABORT) ``` -For local testing, you can change the variables to refer to your local files, or create a soft link -to the corresponding folder for redis-server and copy the config file to `testdata/redis/`: +### Error Wrapping in Hooks -```shell -ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src -cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/ +When wrapping errors in hooks, use custom error types with `Unwrap()` method (preferred) or `fmt.Errorf` with `%w`. Always call `cmd.SetErr()` to preserve error type information: + +```go +// Custom error type (preferred) +type AppError struct { + Code string + RequestID string + Err error +} + +func (e *AppError) Error() string { + return fmt.Sprintf("[%s] request_id=%s: %v", e.Code, e.RequestID, e.Err) +} + +func (e *AppError) Unwrap() error { + return e.Err +} + +// Hook implementation +func (h MyHook) ProcessHook(next redis.ProcessHook) redis.ProcessHook { + return func(ctx context.Context, cmd redis.Cmder) error { + err := next(ctx, cmd) + if err != nil { + // Wrap with custom error type + wrappedErr := &AppError{ + Code: "REDIS_ERROR", + RequestID: getRequestID(ctx), + Err: err, + } + cmd.SetErr(wrappedErr) + return wrappedErr // Return wrapped error to preserve it + } + return nil + } +} + +// Typed error detection works through wrappers +if redis.IsLoadingError(err) { + // Retry logic +} + +// Extract custom error if needed +var appErr *AppError +if errors.As(err, &appErr) { + log.Printf("Request: %s", appErr.RequestID) +} ``` -Lastly, run: +Alternatively, use `fmt.Errorf` with `%w`: +```go +wrappedErr := fmt.Errorf("context: %w", err) +cmd.SetErr(wrappedErr) +``` -```shell -go test +### Pipeline Hook Example + +For pipeline operations, use `ProcessPipelineHook`: + +```go +type PipelineLoggingHook struct{} + +func (h PipelineLoggingHook) DialHook(next redis.DialHook) redis.DialHook { + return next +} + +func (h PipelineLoggingHook) ProcessHook(next redis.ProcessHook) redis.ProcessHook { + return next +} + +func (h PipelineLoggingHook) ProcessPipelineHook(next redis.ProcessPipelineHook) redis.ProcessPipelineHook { + return func(ctx context.Context, cmds []redis.Cmder) error { + start := time.Now() + + // Execute the pipeline + err := next(ctx, cmds) + + duration := time.Since(start) + log.Printf("Pipeline executed %d commands in %v", len(cmds), duration) + + // Process individual command errors + // Note: Individual command errors are already set on each cmd by the pipeline execution + for _, cmd := range cmds { + if cmdErr := cmd.Err(); cmdErr != nil { + // Check for specific error types using typed error functions + if redis.IsAuthError(cmdErr) { + log.Printf("Auth error in pipeline command %s: %v", cmd.Name(), cmdErr) + } else if redis.IsPermissionError(cmdErr) { + log.Printf("Permission error in pipeline command %s: %v", cmd.Name(), cmdErr) + } + + // Optionally wrap individual command errors to add context + // The wrapped error preserves type information through errors.As() + wrappedErr := fmt.Errorf("pipeline cmd %s failed: %w", cmd.Name(), cmdErr) + cmd.SetErr(wrappedErr) + } + } + + // Return the pipeline-level error (connection errors, etc.) + // You can wrap it if needed, or return it as-is + return err + } +} + +// Register the hook +rdb.AddHook(PipelineLoggingHook{}) + +// Use pipeline - errors are still properly typed +pipe := rdb.Pipeline() +pipe.Set(ctx, "key1", "value1", 0) +pipe.Get(ctx, "key2") +_, err := pipe.Exec(ctx) ``` -Another option is to run your specific tests with an already running redis. The example below, tests -against a redis running on port 9999.: +## Run the test +Recommended to use Docker, just need to run: ```shell -REDIS_PORT=9999 go test +make test ``` ## See also @@ -266,6 +581,14 @@ REDIS_PORT=9999 go test ## Contributors +> The go-redis project was originally initiated by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). +> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can +> use it to monitor applications and set up automatic alerts to receive notifications via email, +> Slack, Telegram, and others. +> +> See [OpenTelemetry](https://github.com/redis/go-redis/tree/master/example/otel) example which +> demonstrates how you can use Uptrace to monitor go-redis. + Thanks to all the people who already contributed! diff --git a/vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md b/vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md new file mode 100644 index 000000000..e38ade442 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md @@ -0,0 +1,705 @@ +# Release Notes + +# 9.17.2 (2025-12-01) + +## 🐛 Bug Fixes + +- **Connection Pool**: Fixed critical race condition in turn management that could cause connection leaks when dial goroutines complete after request timeout ([#3626](https://github.com/redis/go-redis/pull/3626)) by [@cyningsun](https://github.com/cyningsun) +- **Context Timeout**: Improved context timeout calculation to use minimum of remaining time and DialTimeout, preventing goroutines from waiting longer than necessary ([#3626](https://github.com/redis/go-redis/pull/3626)) by [@cyningsun](https://github.com/cyningsun) + +## 🧰 Maintenance + +- chore(deps): bump rojopolis/spellcheck-github-actions from 0.54.0 to 0.55.0 ([#3627](https://github.com/redis/go-redis/pull/3627)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@cyningsun](https://github.com/cyningsun) and [@ndyakov](https://github.com/ndyakov) + +--- + +**Full Changelog**: https://github.com/redis/go-redis/compare/v9.17.1...v9.17.2 + +# 9.17.1 (2025-11-25) + +## 🐛 Bug Fixes + +- add wait to keyless commands list ([#3615](https://github.com/redis/go-redis/pull/3615)) by [@marcoferrer](https://github.com/marcoferrer) +- fix(time): remove cached time optimization ([#3611](https://github.com/redis/go-redis/pull/3611)) by [@ndyakov](https://github.com/ndyakov) + +## 🧰 Maintenance + +- chore(deps): bump golangci/golangci-lint-action from 9.0.0 to 9.1.0 ([#3609](https://github.com/redis/go-redis/pull/3609)) +- chore(deps): bump actions/checkout from 5 to 6 ([#3610](https://github.com/redis/go-redis/pull/3610)) +- chore(script): fix help call in tag.sh ([#3606](https://github.com/redis/go-redis/pull/3606)) by [@ndyakov](https://github.com/ndyakov) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@marcoferrer](https://github.com/marcoferrer) and [@ndyakov](https://github.com/ndyakov) + +--- + +**Full Changelog**: https://github.com/redis/go-redis/compare/v9.17.0...v9.17.1 + +# 9.17.0 (2025-11-19) + +## 🚀 Highlights + +### Redis 8.4 Support +Added support for Redis 8.4, including new commands and features ([#3572](https://github.com/redis/go-redis/pull/3572)) + +### Typed Errors +Introduced typed errors for better error handling using `errors.As` instead of string checks. Errors can now be wrapped and set to commands in hooks without breaking library functionality ([#3602](https://github.com/redis/go-redis/pull/3602)) + +### New Commands +- **CAS/CAD Commands**: Added support for Compare-And-Set/Compare-And-Delete operations with conditional matching (`IFEQ`, `IFNE`, `IFDEQ`, `IFDNE`) ([#3583](https://github.com/redis/go-redis/pull/3583), [#3595](https://github.com/redis/go-redis/pull/3595)) +- **MSETEX**: Atomically set multiple key-value pairs with expiration options and conditional modes ([#3580](https://github.com/redis/go-redis/pull/3580)) +- **XReadGroup CLAIM**: Consume both incoming and idle pending entries from streams in a single call ([#3578](https://github.com/redis/go-redis/pull/3578)) +- **ACL Commands**: Added `ACLGenPass`, `ACLUsers`, and `ACLWhoAmI` ([#3576](https://github.com/redis/go-redis/pull/3576)) +- **SLOWLOG Commands**: Added `SLOWLOG LEN` and `SLOWLOG RESET` ([#3585](https://github.com/redis/go-redis/pull/3585)) +- **LATENCY Commands**: Added `LATENCY LATEST` and `LATENCY RESET` ([#3584](https://github.com/redis/go-redis/pull/3584)) + +### Search & Vector Improvements +- **Hybrid Search**: Added **EXPERIMENTAL** support for the new `FT.HYBRID` command ([#3573](https://github.com/redis/go-redis/pull/3573)) +- **Vector Range**: Added `VRANGE` command for vector sets ([#3543](https://github.com/redis/go-redis/pull/3543)) +- **FT.INFO Enhancements**: Added vector-specific attributes in FT.INFO response ([#3596](https://github.com/redis/go-redis/pull/3596)) + +### Connection Pool Improvements +- **Improved Connection Success Rate**: Implemented FIFO queue-based fairness and context pattern for connection creation to prevent premature cancellation under high concurrency ([#3518](https://github.com/redis/go-redis/pull/3518)) +- **Connection State Machine**: Resolved race conditions and improved pool performance with proper state tracking ([#3559](https://github.com/redis/go-redis/pull/3559)) +- **Pool Performance**: Significant performance improvements with faster semaphores, lockless hook manager, and reduced allocations (47-67% faster Get/Put operations) ([#3565](https://github.com/redis/go-redis/pull/3565)) + +### Metrics & Observability +- **Canceled Metric Attribute**: Added 'canceled' metrics attribute to distinguish context cancellation errors from other errors ([#3566](https://github.com/redis/go-redis/pull/3566)) + +## ✨ New Features + +- Typed errors with wrapping support ([#3602](https://github.com/redis/go-redis/pull/3602)) by [@ndyakov](https://github.com/ndyakov) +- CAS/CAD commands (marked as experimental) ([#3583](https://github.com/redis/go-redis/pull/3583), [#3595](https://github.com/redis/go-redis/pull/3595)) by [@ndyakov](https://github.com/ndyakov), [@htemelski-redis](https://github.com/htemelski-redis) +- MSETEX command support ([#3580](https://github.com/redis/go-redis/pull/3580)) by [@ofekshenawa](https://github.com/ofekshenawa) +- XReadGroup CLAIM argument ([#3578](https://github.com/redis/go-redis/pull/3578)) by [@ofekshenawa](https://github.com/ofekshenawa) +- ACL commands: GenPass, Users, WhoAmI ([#3576](https://github.com/redis/go-redis/pull/3576)) by [@destinyoooo](https://github.com/destinyoooo) +- SLOWLOG commands: LEN, RESET ([#3585](https://github.com/redis/go-redis/pull/3585)) by [@destinyoooo](https://github.com/destinyoooo) +- LATENCY commands: LATEST, RESET ([#3584](https://github.com/redis/go-redis/pull/3584)) by [@destinyoooo](https://github.com/destinyoooo) +- Hybrid search command (FT.HYBRID) ([#3573](https://github.com/redis/go-redis/pull/3573)) by [@htemelski-redis](https://github.com/htemelski-redis) +- Vector range command (VRANGE) ([#3543](https://github.com/redis/go-redis/pull/3543)) by [@cxljs](https://github.com/cxljs) +- Vector-specific attributes in FT.INFO ([#3596](https://github.com/redis/go-redis/pull/3596)) by [@ndyakov](https://github.com/ndyakov) +- Improved connection pool success rate with FIFO queue ([#3518](https://github.com/redis/go-redis/pull/3518)) by [@cyningsun](https://github.com/cyningsun) +- Canceled metrics attribute for context errors ([#3566](https://github.com/redis/go-redis/pull/3566)) by [@pvragov](https://github.com/pvragov) + +## 🐛 Bug Fixes + +- Fixed Failover Client MaintNotificationsConfig ([#3600](https://github.com/redis/go-redis/pull/3600)) by [@ajax16384](https://github.com/ajax16384) +- Fixed ACLGenPass function to use the bit parameter ([#3597](https://github.com/redis/go-redis/pull/3597)) by [@destinyoooo](https://github.com/destinyoooo) +- Return error instead of panic from commands ([#3568](https://github.com/redis/go-redis/pull/3568)) by [@dragneelfps](https://github.com/dragneelfps) +- Safety harness in `joinErrors` to prevent panic ([#3577](https://github.com/redis/go-redis/pull/3577)) by [@manisharma](https://github.com/manisharma) + +## ⚡ Performance + +- Connection state machine with race condition fixes ([#3559](https://github.com/redis/go-redis/pull/3559)) by [@ndyakov](https://github.com/ndyakov) +- Pool performance improvements: 47-67% faster Get/Put, 33% less memory, 50% fewer allocations ([#3565](https://github.com/redis/go-redis/pull/3565)) by [@ndyakov](https://github.com/ndyakov) + +## 🧪 Testing & Infrastructure + +- Updated to Redis 8.4.0 image ([#3603](https://github.com/redis/go-redis/pull/3603)) by [@ndyakov](https://github.com/ndyakov) +- Added Redis 8.4-RC1-pre to CI ([#3572](https://github.com/redis/go-redis/pull/3572)) by [@ndyakov](https://github.com/ndyakov) +- Refactored tests for idiomatic Go ([#3561](https://github.com/redis/go-redis/pull/3561), [#3562](https://github.com/redis/go-redis/pull/3562), [#3563](https://github.com/redis/go-redis/pull/3563)) by [@12ya](https://github.com/12ya) + +## 👥 Contributors + +We'd like to thank all the contributors who worked on this release! + +[@12ya](https://github.com/12ya), [@ajax16384](https://github.com/ajax16384), [@cxljs](https://github.com/cxljs), [@cyningsun](https://github.com/cyningsun), [@destinyoooo](https://github.com/destinyoooo), [@dragneelfps](https://github.com/dragneelfps), [@htemelski-redis](https://github.com/htemelski-redis), [@manisharma](https://github.com/manisharma), [@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@pvragov](https://github.com/pvragov) + +--- + +**Full Changelog**: https://github.com/redis/go-redis/compare/v9.16.0...v9.17.0 + +# 9.16.0 (2025-10-23) + +## 🚀 Highlights + +### Maintenance Notifications Support + +This release introduces comprehensive support for Redis maintenance notifications, enabling applications to handle server maintenance events gracefully. The new `maintnotifications` package provides: + +- **RESP3 Push Notifications**: Full support for Redis RESP3 protocol push notifications +- **Connection Handoff**: Automatic connection migration during server maintenance with configurable retry policies and circuit breakers +- **Graceful Degradation**: Configurable timeout relaxation during maintenance windows to prevent false failures +- **Event-Driven Architecture**: Background workers with on-demand scaling for efficient handoff processing +- **Production-Ready**: Comprehensive E2E testing framework and monitoring capabilities + +For detailed usage examples and configuration options, see the [maintenance notifications documentation](maintnotifications/README.md). + +## ✨ New Features + +- **Trace Filtering**: Add support for filtering traces for specific commands, including pipeline operations and dial operations ([#3519](https://github.com/redis/go-redis/pull/3519), [#3550](https://github.com/redis/go-redis/pull/3550)) + - New `TraceCmdFilter` option to selectively trace commands + - Reduces overhead by excluding high-frequency or low-value commands from traces + +## 🐛 Bug Fixes + +- **Pipeline Error Handling**: Fix issue where pipeline repeatedly sets the same error ([#3525](https://github.com/redis/go-redis/pull/3525)) +- **Connection Pool**: Ensure re-authentication does not interfere with connection handoff operations ([#3547](https://github.com/redis/go-redis/pull/3547)) + +## 🔧 Improvements + +- **Hash Commands**: Update hash command implementations ([#3523](https://github.com/redis/go-redis/pull/3523)) +- **OpenTelemetry**: Use `metric.WithAttributeSet` to avoid unnecessary attribute copying in redisotel ([#3552](https://github.com/redis/go-redis/pull/3552)) + +## 📚 Documentation + +- **Cluster Client**: Add explanation for why `MaxRetries` is disabled for `ClusterClient` ([#3551](https://github.com/redis/go-redis/pull/3551)) + +## 🧪 Testing & Infrastructure + +- **E2E Testing**: Upgrade E2E testing framework with improved reliability and coverage ([#3541](https://github.com/redis/go-redis/pull/3541)) +- **Release Process**: Improved resiliency of the release process ([#3530](https://github.com/redis/go-redis/pull/3530)) + +## 📦 Dependencies + +- Bump `rojopolis/spellcheck-github-actions` from 0.51.0 to 0.52.0 ([#3520](https://github.com/redis/go-redis/pull/3520)) +- Bump `github/codeql-action` from 3 to 4 ([#3544](https://github.com/redis/go-redis/pull/3544)) + +## 👥 Contributors + +We'd like to thank all the contributors who worked on this release! + +[@ndyakov](https://github.com/ndyakov), [@htemelski-redis](https://github.com/htemelski-redis), [@Sovietaced](https://github.com/Sovietaced), [@Udhayarajan](https://github.com/Udhayarajan), [@boekkooi-impossiblecloud](https://github.com/boekkooi-impossiblecloud), [@Pika-Gopher](https://github.com/Pika-Gopher), [@cxljs](https://github.com/cxljs), [@huiyifyj](https://github.com/huiyifyj), [@omid-h70](https://github.com/omid-h70) + +--- + +**Full Changelog**: https://github.com/redis/go-redis/compare/v9.14.0...v9.16.0 + + +# 9.15.0 was accidentally released. Please use version 9.16.0 instead. + +# 9.15.0-beta.3 (2025-09-26) + +## Highlights +This beta release includes a pre-production version of processing push notifications and hitless upgrades. + +# Changes + +- chore: Update hash_commands.go ([#3523](https://github.com/redis/go-redis/pull/3523)) + +## 🚀 New Features + +- feat: RESP3 notifications support & Hitless notifications handling ([#3418](https://github.com/redis/go-redis/pull/3418)) + +## 🐛 Bug Fixes + +- fix: pipeline repeatedly sets the error ([#3525](https://github.com/redis/go-redis/pull/3525)) + +## 🧰 Maintenance + +- chore(deps): bump rojopolis/spellcheck-github-actions from 0.51.0 to 0.52.0 ([#3520](https://github.com/redis/go-redis/pull/3520)) +- feat(e2e-testing): maintnotifications e2e and refactor ([#3526](https://github.com/redis/go-redis/pull/3526)) +- feat(tag.sh): Improved resiliency of the release process ([#3530](https://github.com/redis/go-redis/pull/3530)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@cxljs](https://github.com/cxljs), [@ndyakov](https://github.com/ndyakov), [@htemelski-redis](https://github.com/htemelski-redis), and [@omid-h70](https://github.com/omid-h70) + + +# 9.15.0-beta.1 (2025-09-10) + +## Highlights +This beta release includes a pre-production version of processing push notifications and hitless upgrades. + +### Hitless Upgrades +Hitless upgrades is a major new feature that allows for zero-downtime upgrades in Redis clusters. +You can find more information in the [Hitless Upgrades documentation](https://github.com/redis/go-redis/tree/master/hitless). + +# Changes + +## 🚀 New Features +- [CAE-1088] & [CAE-1072] feat: RESP3 notifications support & Hitless notifications handling ([#3418](https://github.com/redis/go-redis/pull/3418)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@ndyakov](https://github.com/ndyakov), [@htemelski-redis](https://github.com/htemelski-redis), [@ofekshenawa](https://github.com/ofekshenawa) + + +# 9.14.0 (2025-09-10) + +## Highlights +- Added batch process method to the pipeline ([#3510](https://github.com/redis/go-redis/pull/3510)) + +# Changes + +## 🚀 New Features + +- Added batch process method to the pipeline ([#3510](https://github.com/redis/go-redis/pull/3510)) + +## 🐛 Bug Fixes + +- fix: SetErr on Cmd if the command cannot be queued correctly in multi/exec ([#3509](https://github.com/redis/go-redis/pull/3509)) + +## 🧰 Maintenance + +- Updates release drafter config to exclude dependabot ([#3511](https://github.com/redis/go-redis/pull/3511)) +- chore(deps): bump actions/setup-go from 5 to 6 ([#3504](https://github.com/redis/go-redis/pull/3504)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@elena-kolevska](https://github.com/elena-kolevksa), [@htemelski-redis](https://github.com/htemelski-redis) and [@ndyakov](https://github.com/ndyakov) + + +# 9.13.0 (2025-09-03) + +## Highlights +- Pipeliner expose queued commands ([#3496](https://github.com/redis/go-redis/pull/3496)) +- Ensure that JSON.GET returns Nil response ([#3470](https://github.com/redis/go-redis/pull/3470)) +- Fixes on Read and Write buffer sizes and UniversalOptions + +## Changes +- Pipeliner expose queued commands ([#3496](https://github.com/redis/go-redis/pull/3496)) +- fix(test): fix a timing issue in pubsub test ([#3498](https://github.com/redis/go-redis/pull/3498)) +- Allow users to enable read-write splitting in failover mode. ([#3482](https://github.com/redis/go-redis/pull/3482)) +- Set the read/write buffer size of the sentinel client to 4KiB ([#3476](https://github.com/redis/go-redis/pull/3476)) + +## 🚀 New Features + +- fix(otel): register wait metrics ([#3499](https://github.com/redis/go-redis/pull/3499)) +- Support subscriptions against cluster slave nodes ([#3480](https://github.com/redis/go-redis/pull/3480)) +- Add wait metrics to otel ([#3493](https://github.com/redis/go-redis/pull/3493)) +- Clean failing timeout implementation ([#3472](https://github.com/redis/go-redis/pull/3472)) + +## 🐛 Bug Fixes + +- Do not assume that all non-IP hosts are loopbacks ([#3085](https://github.com/redis/go-redis/pull/3085)) +- Ensure that JSON.GET returns Nil response ([#3470](https://github.com/redis/go-redis/pull/3470)) + +## 🧰 Maintenance + +- fix(otel): register wait metrics ([#3499](https://github.com/redis/go-redis/pull/3499)) +- fix(make test): Add default env in makefile ([#3491](https://github.com/redis/go-redis/pull/3491)) +- Update the introduction to running tests in README.md ([#3495](https://github.com/redis/go-redis/pull/3495)) +- test: Add comprehensive edge case tests for IncrByFloat command ([#3477](https://github.com/redis/go-redis/pull/3477)) +- Set the default read/write buffer size of Redis connection to 32KiB ([#3483](https://github.com/redis/go-redis/pull/3483)) +- Bumps test image to 8.2.1-pre ([#3478](https://github.com/redis/go-redis/pull/3478)) +- fix UniversalOptions miss ReadBufferSize and WriteBufferSize options ([#3485](https://github.com/redis/go-redis/pull/3485)) +- chore(deps): bump actions/checkout from 4 to 5 ([#3484](https://github.com/redis/go-redis/pull/3484)) +- Removes dry run for stale issues policy ([#3471](https://github.com/redis/go-redis/pull/3471)) +- Update otel metrics URL ([#3474](https://github.com/redis/go-redis/pull/3474)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@LINKIWI](https://github.com/LINKIWI), [@cxljs](https://github.com/cxljs), [@cybersmeashish](https://github.com/cybersmeashish), [@elena-kolevska](https://github.com/elena-kolevska), [@htemelski-redis](https://github.com/htemelski-redis), [@mwhooker](https://github.com/mwhooker), [@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@suever](https://github.com/suever) + + +# 9.12.1 (2025-08-11) +## 🚀 Highlights +In the last version (9.12.0) the client introduced bigger write and read buffer sized. The default value we set was 512KiB. +However, users reported that this is too big for most use cases and can lead to high memory usage. +In this version the default value is changed to 256KiB. The `README.md` was updated to reflect the +correct default value and include a note that the default value can be changed. + +## 🐛 Bug Fixes + +- fix(options): Add buffer sizes to failover. Update README ([#3468](https://github.com/redis/go-redis/pull/3468)) + +## 🧰 Maintenance + +- fix(options): Add buffer sizes to failover. Update README ([#3468](https://github.com/redis/go-redis/pull/3468)) +- chore: update & fix otel example ([#3466](https://github.com/redis/go-redis/pull/3466)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@ndyakov](https://github.com/ndyakov) and [@vmihailenco](https://github.com/vmihailenco) + +# 9.12.0 (2025-08-05) + +## 🚀 Highlights + +- This release includes support for [Redis 8.2](https://redis.io/docs/latest/operate/oss_and_stack/stack-with-enterprise/release-notes/redisce/redisos-8.2-release-notes/). +- Introduces an experimental Query Builders for `FTSearch`, `FTAggregate` and other search commands. +- Adds support for `EPSILON` option in `FT.VSIM`. +- Includes bug fixes and improvements contributed by the community related to ring and [redisotel](https://github.com/redis/go-redis/tree/master/extra/redisotel). + +## Changes +- Improve stale issue workflow ([#3458](https://github.com/redis/go-redis/pull/3458)) +- chore(ci): Add 8.2 rc2 pre build for CI ([#3459](https://github.com/redis/go-redis/pull/3459)) +- Added new stream commands ([#3450](https://github.com/redis/go-redis/pull/3450)) +- feat: Add "skip_verify" to Sentinel ([#3428](https://github.com/redis/go-redis/pull/3428)) +- fix: `errors.Join` requires Go 1.20 or later ([#3442](https://github.com/redis/go-redis/pull/3442)) +- DOC-4344 document quickstart examples ([#3426](https://github.com/redis/go-redis/pull/3426)) +- feat(bitop): add support for the new bitop operations ([#3409](https://github.com/redis/go-redis/pull/3409)) + +## 🚀 New Features + +- feat: recover addIdleConn may occur panic ([#2445](https://github.com/redis/go-redis/pull/2445)) +- feat(ring): specify custom health check func via HeartbeatFn option ([#2940](https://github.com/redis/go-redis/pull/2940)) +- Add Query Builder for RediSearch commands ([#3436](https://github.com/redis/go-redis/pull/3436)) +- add configurable buffer sizes for Redis connections ([#3453](https://github.com/redis/go-redis/pull/3453)) +- Add VAMANA vector type to RediSearch ([#3449](https://github.com/redis/go-redis/pull/3449)) +- VSIM add `EPSILON` option ([#3454](https://github.com/redis/go-redis/pull/3454)) +- Add closing support to otel metrics instrumentation ([#3444](https://github.com/redis/go-redis/pull/3444)) + +## 🐛 Bug Fixes + +- fix(redisotel): fix buggy append in reportPoolStats ([#3122](https://github.com/redis/go-redis/pull/3122)) +- fix(search): return results even if doc is empty ([#3457](https://github.com/redis/go-redis/pull/3457)) +- [ISSUE-3402]: Ring.Pipelined return dial timeout error ([#3403](https://github.com/redis/go-redis/pull/3403)) + +## 🧰 Maintenance + +- Merges stale issues jobs into one job with two steps ([#3463](https://github.com/redis/go-redis/pull/3463)) +- improve code readability ([#3446](https://github.com/redis/go-redis/pull/3446)) +- chore(release): 9.12.0-beta.1 ([#3460](https://github.com/redis/go-redis/pull/3460)) +- DOC-5472 time series doc examples ([#3443](https://github.com/redis/go-redis/pull/3443)) +- Add VAMANA compression algorithm tests ([#3461](https://github.com/redis/go-redis/pull/3461)) +- bumped redis 8.2 version used in the CI/CD ([#3451](https://github.com/redis/go-redis/pull/3451)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@andy-stark-redis](https://github.com/andy-stark-redis), [@cxljs](https://github.com/cxljs), [@elena-kolevska](https://github.com/elena-kolevska), [@htemelski-redis](https://github.com/htemelski-redis), [@jouir](https://github.com/jouir), [@monkey92t](https://github.com/monkey92t), [@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@rokn](https://github.com/rokn), [@smnvdev](https://github.com/smnvdev), [@strobil](https://github.com/strobil) and [@wzy9607](https://github.com/wzy9607) + +## New Contributors +* [@htemelski-redis](https://github.com/htemelski-redis) made their first contribution in [#3409](https://github.com/redis/go-redis/pull/3409) +* [@smnvdev](https://github.com/smnvdev) made their first contribution in [#3403](https://github.com/redis/go-redis/pull/3403) +* [@rokn](https://github.com/rokn) made their first contribution in [#3444](https://github.com/redis/go-redis/pull/3444) + +# 9.11.0 (2025-06-24) + +## 🚀 Highlights + +Fixes TxPipeline to work correctly in cluster scenarios, allowing execution of commands +only in the same slot. + +# Changes + +## 🚀 New Features + +- Set cluster slot for `scan` commands, rather than random ([#2623](https://github.com/redis/go-redis/pull/2623)) +- Add CredentialsProvider field to UniversalOptions ([#2927](https://github.com/redis/go-redis/pull/2927)) +- feat(redisotel): add WithCallerEnabled option ([#3415](https://github.com/redis/go-redis/pull/3415)) + +## 🐛 Bug Fixes + +- fix(txpipeline): keyless commands should take the slot of the keyed ([#3411](https://github.com/redis/go-redis/pull/3411)) +- fix(loading): cache the loaded flag for slave nodes ([#3410](https://github.com/redis/go-redis/pull/3410)) +- fix(txpipeline): should return error on multi/exec on multiple slots ([#3408](https://github.com/redis/go-redis/pull/3408)) +- fix: check if the shard exists to avoid returning nil ([#3396](https://github.com/redis/go-redis/pull/3396)) + +## 🧰 Maintenance + +- feat: optimize connection pool waitTurn ([#3412](https://github.com/redis/go-redis/pull/3412)) +- chore(ci): update CI redis builds ([#3407](https://github.com/redis/go-redis/pull/3407)) +- chore: remove a redundant method from `Ring`, `Client` and `ClusterClient` ([#3401](https://github.com/redis/go-redis/pull/3401)) +- test: refactor TestBasicCredentials using table-driven tests ([#3406](https://github.com/redis/go-redis/pull/3406)) +- perf: reduce unnecessary memory allocation operations ([#3399](https://github.com/redis/go-redis/pull/3399)) +- fix: insert entry during iterating over a map ([#3398](https://github.com/redis/go-redis/pull/3398)) +- DOC-5229 probabilistic data type examples ([#3413](https://github.com/redis/go-redis/pull/3413)) +- chore(deps): bump rojopolis/spellcheck-github-actions from 0.49.0 to 0.51.0 ([#3414](https://github.com/redis/go-redis/pull/3414)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@andy-stark-redis](https://github.com/andy-stark-redis), [@boekkooi-impossiblecloud](https://github.com/boekkooi-impossiblecloud), [@cxljs](https://github.com/cxljs), [@dcherubini](https://github.com/dcherubini), [@dependabot[bot]](https://github.com/apps/dependabot), [@iamamirsalehi](https://github.com/iamamirsalehi), [@ndyakov](https://github.com/ndyakov), [@pete-woods](https://github.com/pete-woods), [@twz915](https://github.com/twz915) and [dependabot[bot]](https://github.com/apps/dependabot) + +# 9.10.0 (2025-06-06) + +## 🚀 Highlights + +`go-redis` now supports [vector sets](https://redis.io/docs/latest/develop/data-types/vector-sets/). This data type is marked +as "in preview" in Redis and its support in `go-redis` is marked as experimental. You can find examples in the documentation and +in the `doctests` folder. + +# Changes + +## 🚀 New Features + +- feat: support vectorset ([#3375](https://github.com/redis/go-redis/pull/3375)) + +## 🧰 Maintenance + +- Add the missing NewFloatSliceResult for testing ([#3393](https://github.com/redis/go-redis/pull/3393)) +- DOC-5078 vector set examples ([#3394](https://github.com/redis/go-redis/pull/3394)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@AndBobsYourUncle](https://github.com/AndBobsYourUncle), [@andy-stark-redis](https://github.com/andy-stark-redis), [@fukua95](https://github.com/fukua95) and [@ndyakov](https://github.com/ndyakov) + + + +# 9.9.0 (2025-05-27) + +## 🚀 Highlights +- **Token-based Authentication**: Added `StreamingCredentialsProvider` for dynamic credential updates (experimental) + - Can be used with [go-redis-entraid](https://github.com/redis/go-redis-entraid) for Azure AD authentication +- **Connection Statistics**: Added connection waiting statistics for better monitoring +- **Failover Improvements**: Added `ParseFailoverURL` for easier failover configuration +- **Ring Client Enhancements**: Added shard access methods for better Pub/Sub management + +## ✨ New Features +- Added `StreamingCredentialsProvider` for token-based authentication ([#3320](https://github.com/redis/go-redis/pull/3320)) + - Supports dynamic credential updates + - Includes connection close hooks + - Note: Currently marked as experimental +- Added `ParseFailoverURL` for parsing failover URLs ([#3362](https://github.com/redis/go-redis/pull/3362)) +- Added connection waiting statistics ([#2804](https://github.com/redis/go-redis/pull/2804)) +- Added new utility functions: + - `ParseFloat` and `MustParseFloat` in public utils package ([#3371](https://github.com/redis/go-redis/pull/3371)) + - Unit tests for `Atoi`, `ParseInt`, `ParseUint`, and `ParseFloat` ([#3377](https://github.com/redis/go-redis/pull/3377)) +- Added Ring client shard access methods: + - `GetShardClients()` to retrieve all active shard clients + - `GetShardClientForKey(key string)` to get the shard client for a specific key ([#3388](https://github.com/redis/go-redis/pull/3388)) + +## 🐛 Bug Fixes +- Fixed routing reads to loading slave nodes ([#3370](https://github.com/redis/go-redis/pull/3370)) +- Added support for nil lag in XINFO GROUPS ([#3369](https://github.com/redis/go-redis/pull/3369)) +- Fixed pool acquisition timeout issues ([#3381](https://github.com/redis/go-redis/pull/3381)) +- Optimized unnecessary copy operations ([#3376](https://github.com/redis/go-redis/pull/3376)) + +## 📚 Documentation +- Updated documentation for XINFO GROUPS with nil lag support ([#3369](https://github.com/redis/go-redis/pull/3369)) +- Added package-level comments for new features + +## ⚡ Performance and Reliability +- Optimized `ReplaceSpaces` function ([#3383](https://github.com/redis/go-redis/pull/3383)) +- Set default value for `Options.Protocol` in `init()` ([#3387](https://github.com/redis/go-redis/pull/3387)) +- Exported pool errors for public consumption ([#3380](https://github.com/redis/go-redis/pull/3380)) + +## 🔧 Dependencies and Infrastructure +- Updated Redis CI to version 8.0.1 ([#3372](https://github.com/redis/go-redis/pull/3372)) +- Updated spellcheck GitHub Actions ([#3389](https://github.com/redis/go-redis/pull/3389)) +- Removed unused parameters ([#3382](https://github.com/redis/go-redis/pull/3382), [#3384](https://github.com/redis/go-redis/pull/3384)) + +## 🧪 Testing +- Added unit tests for pool acquisition timeout ([#3381](https://github.com/redis/go-redis/pull/3381)) +- Added unit tests for utility functions ([#3377](https://github.com/redis/go-redis/pull/3377)) + +## 👥 Contributors + +We would like to thank all the contributors who made this release possible: + +[@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@LINKIWI](https://github.com/LINKIWI), [@iamamirsalehi](https://github.com/iamamirsalehi), [@fukua95](https://github.com/fukua95), [@lzakharov](https://github.com/lzakharov), [@DengY11](https://github.com/DengY11) + +## 📝 Changelog + +For a complete list of changes, see the [full changelog](https://github.com/redis/go-redis/compare/v9.8.0...v9.9.0). + +# 9.8.0 (2025-04-30) + +## 🚀 Highlights +- **Redis 8 Support**: Full compatibility with Redis 8.0, including testing and CI integration +- **Enhanced Hash Operations**: Added support for new hash commands (`HGETDEL`, `HGETEX`, `HSETEX`) and `HSTRLEN` command +- **Search Improvements**: Enabled Search DIALECT 2 by default and added `CountOnly` argument for `FT.Search` + +## ✨ New Features +- Added support for new hash commands: `HGETDEL`, `HGETEX`, `HSETEX` ([#3305](https://github.com/redis/go-redis/pull/3305)) +- Added `HSTRLEN` command for hash operations ([#2843](https://github.com/redis/go-redis/pull/2843)) +- Added `Do` method for raw query by single connection from `pool.Conn()` ([#3182](https://github.com/redis/go-redis/pull/3182)) +- Prevent false-positive marshaling by treating zero time.Time as empty in isEmptyValue ([#3273](https://github.com/redis/go-redis/pull/3273)) +- Added FailoverClusterClient support for Universal client ([#2794](https://github.com/redis/go-redis/pull/2794)) +- Added support for cluster mode with `IsClusterMode` config parameter ([#3255](https://github.com/redis/go-redis/pull/3255)) +- Added client name support in `HELLO` RESP handshake ([#3294](https://github.com/redis/go-redis/pull/3294)) +- **Enabled Search DIALECT 2 by default** ([#3213](https://github.com/redis/go-redis/pull/3213)) +- Added read-only option for failover configurations ([#3281](https://github.com/redis/go-redis/pull/3281)) +- Added `CountOnly` argument for `FT.Search` to use `LIMIT 0 0` ([#3338](https://github.com/redis/go-redis/pull/3338)) +- Added `DB` option support in `NewFailoverClusterClient` ([#3342](https://github.com/redis/go-redis/pull/3342)) +- Added `nil` check for the options when creating a client ([#3363](https://github.com/redis/go-redis/pull/3363)) + +## 🐛 Bug Fixes +- Fixed `PubSub` concurrency safety issues ([#3360](https://github.com/redis/go-redis/pull/3360)) +- Fixed panic caused when argument is `nil` ([#3353](https://github.com/redis/go-redis/pull/3353)) +- Improved error handling when fetching master node from sentinels ([#3349](https://github.com/redis/go-redis/pull/3349)) +- Fixed connection pool timeout issues and increased retries ([#3298](https://github.com/redis/go-redis/pull/3298)) +- Fixed context cancellation error leading to connection spikes on Primary instances ([#3190](https://github.com/redis/go-redis/pull/3190)) +- Fixed RedisCluster client to consider `MASTERDOWN` a retriable error ([#3164](https://github.com/redis/go-redis/pull/3164)) +- Fixed tracing to show complete commands instead of truncated versions ([#3290](https://github.com/redis/go-redis/pull/3290)) +- Fixed OpenTelemetry instrumentation to prevent multiple span reporting ([#3168](https://github.com/redis/go-redis/pull/3168)) +- Fixed `FT.Search` Limit argument and added `CountOnly` argument for limit 0 0 ([#3338](https://github.com/redis/go-redis/pull/3338)) +- Fixed missing command in interface ([#3344](https://github.com/redis/go-redis/pull/3344)) +- Fixed slot calculation for `COUNTKEYSINSLOT` command ([#3327](https://github.com/redis/go-redis/pull/3327)) +- Updated PubSub implementation with correct context ([#3329](https://github.com/redis/go-redis/pull/3329)) + +## 📚 Documentation +- Added hash search examples ([#3357](https://github.com/redis/go-redis/pull/3357)) +- Fixed documentation comments ([#3351](https://github.com/redis/go-redis/pull/3351)) +- Added `CountOnly` search example ([#3345](https://github.com/redis/go-redis/pull/3345)) +- Added examples for list commands: `LLEN`, `LPOP`, `LPUSH`, `LRANGE`, `RPOP`, `RPUSH` ([#3234](https://github.com/redis/go-redis/pull/3234)) +- Added `SADD` and `SMEMBERS` command examples ([#3242](https://github.com/redis/go-redis/pull/3242)) +- Updated `README.md` to use Redis Discord guild ([#3331](https://github.com/redis/go-redis/pull/3331)) +- Updated `HExpire` command documentation ([#3355](https://github.com/redis/go-redis/pull/3355)) +- Featured OpenTelemetry instrumentation more prominently ([#3316](https://github.com/redis/go-redis/pull/3316)) +- Updated `README.md` with additional information ([#310ce55](https://github.com/redis/go-redis/commit/310ce55)) + +## ⚡ Performance and Reliability +- Bound connection pool background dials to configured dial timeout ([#3089](https://github.com/redis/go-redis/pull/3089)) +- Ensured context isn't exhausted via concurrent query ([#3334](https://github.com/redis/go-redis/pull/3334)) + +## 🔧 Dependencies and Infrastructure +- Updated testing image to Redis 8.0-RC2 ([#3361](https://github.com/redis/go-redis/pull/3361)) +- Enabled CI for Redis CE 8.0 ([#3274](https://github.com/redis/go-redis/pull/3274)) +- Updated various dependencies: + - Bumped golangci/golangci-lint-action from 6.5.0 to 7.0.0 ([#3354](https://github.com/redis/go-redis/pull/3354)) + - Bumped rojopolis/spellcheck-github-actions ([#3336](https://github.com/redis/go-redis/pull/3336)) + - Bumped golang.org/x/net in example/otel ([#3308](https://github.com/redis/go-redis/pull/3308)) +- Migrated golangci-lint configuration to v2 format ([#3354](https://github.com/redis/go-redis/pull/3354)) + +## ⚠️ Breaking Changes +- **Enabled Search DIALECT 2 by default** ([#3213](https://github.com/redis/go-redis/pull/3213)) +- Dropped RedisGears (Triggers and Functions) support ([#3321](https://github.com/redis/go-redis/pull/3321)) +- Dropped FT.PROFILE command that was never enabled ([#3323](https://github.com/redis/go-redis/pull/3323)) + +## 🔒 Security +- Fixed network error handling on SETINFO (CVE-2025-29923) ([#3295](https://github.com/redis/go-redis/pull/3295)) + +## 🧪 Testing +- Added integration tests for Redis 8 behavior changes in Redis Search ([#3337](https://github.com/redis/go-redis/pull/3337)) +- Added vector types INT8 and UINT8 tests ([#3299](https://github.com/redis/go-redis/pull/3299)) +- Added test codes for search_commands.go ([#3285](https://github.com/redis/go-redis/pull/3285)) +- Fixed example test sorting ([#3292](https://github.com/redis/go-redis/pull/3292)) + +## 👥 Contributors + +We would like to thank all the contributors who made this release possible: + +[@alexander-menshchikov](https://github.com/alexander-menshchikov), [@EXPEbdodla](https://github.com/EXPEbdodla), [@afti](https://github.com/afti), [@dmaier-redislabs](https://github.com/dmaier-redislabs), [@four_leaf_clover](https://github.com/four_leaf_clover), [@alohaglenn](https://github.com/alohaglenn), [@gh73962](https://github.com/gh73962), [@justinmir](https://github.com/justinmir), [@LINKIWI](https://github.com/LINKIWI), [@liushuangbill](https://github.com/liushuangbill), [@golang88](https://github.com/golang88), [@gnpaone](https://github.com/gnpaone), [@ndyakov](https://github.com/ndyakov), [@nikolaydubina](https://github.com/nikolaydubina), [@oleglacto](https://github.com/oleglacto), [@andy-stark-redis](https://github.com/andy-stark-redis), [@rodneyosodo](https://github.com/rodneyosodo), [@dependabot](https://github.com/dependabot), [@rfyiamcool](https://github.com/rfyiamcool), [@frankxjkuang](https://github.com/frankxjkuang), [@fukua95](https://github.com/fukua95), [@soleymani-milad](https://github.com/soleymani-milad), [@ofekshenawa](https://github.com/ofekshenawa), [@khasanovbi](https://github.com/khasanovbi) + + +# Old Changelog +## Unreleased + +### Changed + +* `go-redis` won't skip span creation if the parent spans is not recording. ([#2980](https://github.com/redis/go-redis/issues/2980)) + Users can use the OpenTelemetry sampler to control the sampling behavior. + For instance, you can use the `ParentBased(NeverSample())` sampler from `go.opentelemetry.io/otel/sdk/trace` to keep + a similar behavior (drop orphan spans) of `go-redis` as before. + +## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29) + + +### Features + +* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602)) +* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe)) +* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af)) + + + +## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01) + + +### Bug Fixes + +* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241)) + + +### Features + +* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e)) +* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8)) +* read the structure to increase the judgment of the omitempty op… ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af)) + + + +## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02) + +### New Features + +- feat(scan): scan time.Time sets the default decoding (#2413) +- Add support for CLUSTER LINKS command (#2504) +- Add support for acl dryrun command (#2502) +- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500) +- Add support for LCS Command (#2480) +- Add support for BZMPOP (#2456) +- Adding support for ZMPOP command (#2408) +- Add support for LMPOP (#2440) +- feat: remove pool unused fields (#2438) +- Expiretime and PExpireTime (#2426) +- Implement `FUNCTION` group of commands (#2475) +- feat(zadd): add ZAddLT and ZAddGT (#2429) +- Add: Support for COMMAND LIST command (#2491) +- Add support for BLMPOP (#2442) +- feat: check pipeline.Do to prevent confusion with Exec (#2517) +- Function stats, function kill, fcall and fcall_ro (#2486) +- feat: Add support for CLUSTER SHARDS command (#2507) +- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498) + +### Fixed + +- fix: eval api cmd.SetFirstKeyPos (#2501) +- fix: limit the number of connections created (#2441) +- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479) +- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458) +- fix: group lag can be null (#2448) + +### Maintenance + +- Updating to the latest version of redis (#2508) +- Allowing for running tests on a port other than the fixed 6380 (#2466) +- redis 7.0.8 in tests (#2450) +- docs: Update redisotel example for v9 (#2425) +- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476) +- chore: add Chinese translation (#2436) +- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421) +- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420) +- chore(deps): bump actions/setup-go from 3 to 4 (#2495) +- docs: add instructions for the HSet api (#2503) +- docs: add reading lag field comment (#2451) +- test: update go mod before testing(go mod tidy) (#2423) +- docs: fix comment typo (#2505) +- test: remove testify (#2463) +- refactor: change ListElementCmd to KeyValuesCmd. (#2443) +- fix(appendArg): appendArg case special type (#2489) + +## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01) + +### Features + +* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65)) + +## v9 2023-01-30 + +### Breaking + +- Changed Pipelines to not be thread-safe any more. + +### Added + +- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was + contributed by @monkey92t who has done the majority of work in this release. +- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts + and deadlines. See + [Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details. +- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example, + `redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`. +- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See + [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html) +- Added `redis.HasErrorPrefix` to help working with errors. + +### Changed + +- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is + completely gone in v9. +- Reworked hook interface and added `DialHook`. +- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See + [example](example/otel) and + [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html). +- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making + an allocation. +- Renamed the option `MaxConnAge` to `ConnMaxLifetime`. +- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`. +- Removed connection reaper in favor of `MaxIdleConns`. +- Removed `WithContext` since `context.Context` can be passed directly as an arg. +- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and + it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to + reset commands for some reason. + +### Fixed + +- Improved and fixed pipeline retries. +- As usually, added support for more commands and fixed some bugs. diff --git a/vendor/github.com/redis/go-redis/v9/acl_commands.go b/vendor/github.com/redis/go-redis/v9/acl_commands.go index 06847be2e..0a8a195ce 100644 --- a/vendor/github.com/redis/go-redis/v9/acl_commands.go +++ b/vendor/github.com/redis/go-redis/v9/acl_commands.go @@ -4,8 +4,24 @@ import "context" type ACLCmdable interface { ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd + ACLLog(ctx context.Context, count int64) *ACLLogCmd ACLLogReset(ctx context.Context) *StatusCmd + + ACLGenPass(ctx context.Context, bit int) *StringCmd + + ACLSetUser(ctx context.Context, username string, rules ...string) *StatusCmd + ACLDelUser(ctx context.Context, username string) *IntCmd + ACLUsers(ctx context.Context) *StringSliceCmd + ACLWhoAmI(ctx context.Context) *StringCmd + ACLList(ctx context.Context) *StringSliceCmd + + ACLCat(ctx context.Context) *StringSliceCmd + ACLCatArgs(ctx context.Context, options *ACLCatArgs) *StringSliceCmd +} + +type ACLCatArgs struct { + Category string } func (c cmdable) ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd { @@ -33,3 +49,68 @@ func (c cmdable) ACLLogReset(ctx context.Context) *StatusCmd { _ = c(ctx, cmd) return cmd } + +func (c cmdable) ACLDelUser(ctx context.Context, username string) *IntCmd { + cmd := NewIntCmd(ctx, "acl", "deluser", username) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLSetUser(ctx context.Context, username string, rules ...string) *StatusCmd { + args := make([]interface{}, 3+len(rules)) + args[0] = "acl" + args[1] = "setuser" + args[2] = username + for i, rule := range rules { + args[i+3] = rule + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLGenPass(ctx context.Context, bit int) *StringCmd { + args := make([]interface{}, 0, 3) + args = append(args, "acl", "genpass") + if bit > 0 { + args = append(args, bit) + } + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLUsers(ctx context.Context) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "acl", "users") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLWhoAmI(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "acl", "whoami") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLList(ctx context.Context) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "acl", "list") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLCat(ctx context.Context) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "acl", "cat") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLCatArgs(ctx context.Context, options *ACLCatArgs) *StringSliceCmd { + // if there is a category passed, build new cmd, if there isn't - use the ACLCat method + if options != nil && options.Category != "" { + cmd := NewStringSliceCmd(ctx, "acl", "cat", options.Category) + _ = c(ctx, cmd) + return cmd + } + + return c.ACLCat(ctx) +} diff --git a/vendor/github.com/redis/go-redis/v9/adapters.go b/vendor/github.com/redis/go-redis/v9/adapters.go new file mode 100644 index 000000000..4146153bf --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/adapters.go @@ -0,0 +1,111 @@ +package redis + +import ( + "context" + "errors" + "net" + "time" + + "github.com/redis/go-redis/v9/internal/interfaces" + "github.com/redis/go-redis/v9/push" +) + +// ErrInvalidCommand is returned when an invalid command is passed to ExecuteCommand. +var ErrInvalidCommand = errors.New("invalid command type") + +// ErrInvalidPool is returned when the pool type is not supported. +var ErrInvalidPool = errors.New("invalid pool type") + +// newClientAdapter creates a new client adapter for regular Redis clients. +func newClientAdapter(client *baseClient) interfaces.ClientInterface { + return &clientAdapter{client: client} +} + +// clientAdapter adapts a Redis client to implement interfaces.ClientInterface. +type clientAdapter struct { + client *baseClient +} + +// GetOptions returns the client options. +func (ca *clientAdapter) GetOptions() interfaces.OptionsInterface { + return &optionsAdapter{options: ca.client.opt} +} + +// GetPushProcessor returns the client's push notification processor. +func (ca *clientAdapter) GetPushProcessor() interfaces.NotificationProcessor { + return &pushProcessorAdapter{processor: ca.client.pushProcessor} +} + +// optionsAdapter adapts Redis options to implement interfaces.OptionsInterface. +type optionsAdapter struct { + options *Options +} + +// GetReadTimeout returns the read timeout. +func (oa *optionsAdapter) GetReadTimeout() time.Duration { + return oa.options.ReadTimeout +} + +// GetWriteTimeout returns the write timeout. +func (oa *optionsAdapter) GetWriteTimeout() time.Duration { + return oa.options.WriteTimeout +} + +// GetNetwork returns the network type. +func (oa *optionsAdapter) GetNetwork() string { + return oa.options.Network +} + +// GetAddr returns the connection address. +func (oa *optionsAdapter) GetAddr() string { + return oa.options.Addr +} + +// IsTLSEnabled returns true if TLS is enabled. +func (oa *optionsAdapter) IsTLSEnabled() bool { + return oa.options.TLSConfig != nil +} + +// GetProtocol returns the protocol version. +func (oa *optionsAdapter) GetProtocol() int { + return oa.options.Protocol +} + +// GetPoolSize returns the connection pool size. +func (oa *optionsAdapter) GetPoolSize() int { + return oa.options.PoolSize +} + +// NewDialer returns a new dialer function for the connection. +func (oa *optionsAdapter) NewDialer() func(context.Context) (net.Conn, error) { + baseDialer := oa.options.NewDialer() + return func(ctx context.Context) (net.Conn, error) { + // Extract network and address from the options + network := oa.options.Network + addr := oa.options.Addr + return baseDialer(ctx, network, addr) + } +} + +// pushProcessorAdapter adapts a push.NotificationProcessor to implement interfaces.NotificationProcessor. +type pushProcessorAdapter struct { + processor push.NotificationProcessor +} + +// RegisterHandler registers a handler for a specific push notification name. +func (ppa *pushProcessorAdapter) RegisterHandler(pushNotificationName string, handler interface{}, protected bool) error { + if pushHandler, ok := handler.(push.NotificationHandler); ok { + return ppa.processor.RegisterHandler(pushNotificationName, pushHandler, protected) + } + return errors.New("handler must implement push.NotificationHandler") +} + +// UnregisterHandler removes a handler for a specific push notification name. +func (ppa *pushProcessorAdapter) UnregisterHandler(pushNotificationName string) error { + return ppa.processor.UnregisterHandler(pushNotificationName) +} + +// GetHandler returns the handler for a specific push notification name. +func (ppa *pushProcessorAdapter) GetHandler(pushNotificationName string) interface{} { + return ppa.processor.GetHandler(pushNotificationName) +} diff --git a/vendor/github.com/redis/go-redis/v9/auth/auth.go b/vendor/github.com/redis/go-redis/v9/auth/auth.go new file mode 100644 index 000000000..1f5c80224 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/auth/auth.go @@ -0,0 +1,61 @@ +// Package auth package provides authentication-related interfaces and types. +// It also includes a basic implementation of credentials using username and password. +package auth + +// StreamingCredentialsProvider is an interface that defines the methods for a streaming credentials provider. +// It is used to provide credentials for authentication. +// The CredentialsListener is used to receive updates when the credentials change. +type StreamingCredentialsProvider interface { + // Subscribe subscribes to the credentials provider for updates. + // It returns the current credentials, a cancel function to unsubscribe from the provider, + // and an error if any. + // TODO(ndyakov): Should we add context to the Subscribe method? + Subscribe(listener CredentialsListener) (Credentials, UnsubscribeFunc, error) +} + +// UnsubscribeFunc is a function that is used to cancel the subscription to the credentials provider. +// It is used to unsubscribe from the provider when the credentials are no longer needed. +type UnsubscribeFunc func() error + +// CredentialsListener is an interface that defines the methods for a credentials listener. +// It is used to receive updates when the credentials change. +// The OnNext method is called when the credentials change. +// The OnError method is called when an error occurs while requesting the credentials. +type CredentialsListener interface { + OnNext(credentials Credentials) + OnError(err error) +} + +// Credentials is an interface that defines the methods for credentials. +// It is used to provide the credentials for authentication. +type Credentials interface { + // BasicAuth returns the username and password for basic authentication. + BasicAuth() (username string, password string) + // RawCredentials returns the raw credentials as a string. + // This can be used to extract the username and password from the raw credentials or + // additional information if present in the token. + RawCredentials() string +} + +type basicAuth struct { + username string + password string +} + +// RawCredentials returns the raw credentials as a string. +func (b *basicAuth) RawCredentials() string { + return b.username + ":" + b.password +} + +// BasicAuth returns the username and password for basic authentication. +func (b *basicAuth) BasicAuth() (username string, password string) { + return b.username, b.password +} + +// NewBasicCredentials creates a new Credentials object from the given username and password. +func NewBasicCredentials(username, password string) Credentials { + return &basicAuth{ + username: username, + password: password, + } +} diff --git a/vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go b/vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go new file mode 100644 index 000000000..f4b319838 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go @@ -0,0 +1,47 @@ +package auth + +// ReAuthCredentialsListener is a struct that implements the CredentialsListener interface. +// It is used to re-authenticate the credentials when they are updated. +// It contains: +// - reAuth: a function that takes the new credentials and returns an error if any. +// - onErr: a function that takes an error and handles it. +type ReAuthCredentialsListener struct { + reAuth func(credentials Credentials) error + onErr func(err error) +} + +// OnNext is called when the credentials are updated. +// It calls the reAuth function with the new credentials. +// If the reAuth function returns an error, it calls the onErr function with the error. +func (c *ReAuthCredentialsListener) OnNext(credentials Credentials) { + if c.reAuth == nil { + return + } + + err := c.reAuth(credentials) + if err != nil { + c.OnError(err) + } +} + +// OnError is called when an error occurs. +// It can be called from both the credentials provider and the reAuth function. +func (c *ReAuthCredentialsListener) OnError(err error) { + if c.onErr == nil { + return + } + + c.onErr(err) +} + +// NewReAuthCredentialsListener creates a new ReAuthCredentialsListener. +// Implements the auth.CredentialsListener interface. +func NewReAuthCredentialsListener(reAuth func(credentials Credentials) error, onErr func(err error)) *ReAuthCredentialsListener { + return &ReAuthCredentialsListener{ + reAuth: reAuth, + onErr: onErr, + } +} + +// Ensure ReAuthCredentialsListener implements the CredentialsListener interface. +var _ CredentialsListener = (*ReAuthCredentialsListener)(nil) \ No newline at end of file diff --git a/vendor/github.com/redis/go-redis/v9/bitmap_commands.go b/vendor/github.com/redis/go-redis/v9/bitmap_commands.go index a21558289..86aa9b7ef 100644 --- a/vendor/github.com/redis/go-redis/v9/bitmap_commands.go +++ b/vendor/github.com/redis/go-redis/v9/bitmap_commands.go @@ -12,6 +12,10 @@ type BitMapCmdable interface { BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpDiff(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpDiff1(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpAndOr(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpOne(ctx context.Context, destKey string, keys ...string) *IntCmd BitOpNot(ctx context.Context, destKey string, key string) *IntCmd BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd @@ -78,22 +82,50 @@ func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) return cmd } +// BitOpAnd creates a new bitmap in which users are members of all given bitmaps func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd { return c.bitOp(ctx, "and", destKey, keys...) } +// BitOpOr creates a new bitmap in which users are member of at least one given bitmap func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd { return c.bitOp(ctx, "or", destKey, keys...) } +// BitOpXor creates a new bitmap in which users are the result of XORing all given bitmaps func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd { return c.bitOp(ctx, "xor", destKey, keys...) } +// BitOpNot creates a new bitmap in which users are not members of a given bitmap func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd { return c.bitOp(ctx, "not", destKey, key) } +// BitOpDiff creates a new bitmap in which users are members of bitmap X but not of any of bitmaps Y1, Y2, … +// Introduced with Redis 8.2 +func (c cmdable) BitOpDiff(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "diff", destKey, keys...) +} + +// BitOpDiff1 creates a new bitmap in which users are members of one or more of bitmaps Y1, Y2, … but not members of bitmap X +// Introduced with Redis 8.2 +func (c cmdable) BitOpDiff1(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "diff1", destKey, keys...) +} + +// BitOpAndOr creates a new bitmap in which users are members of bitmap X and also members of one or more of bitmaps Y1, Y2, … +// Introduced with Redis 8.2 +func (c cmdable) BitOpAndOr(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "andor", destKey, keys...) +} + +// BitOpOne creates a new bitmap in which users are members of exactly one of the given bitmaps +// Introduced with Redis 8.2 +func (c cmdable) BitOpOne(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "one", destKey, keys...) +} + // BitPos is an API before Redis version 7.0, cmd: bitpos key bit start end // if you need the `byte | bit` parameter, please use `BitPosSpan`. func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd { @@ -109,7 +141,9 @@ func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64 args[3] = pos[0] args[4] = pos[1] default: - panic("too many arguments") + cmd := NewIntCmd(ctx) + cmd.SetErr(errors.New("too many arguments")) + return cmd } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) @@ -150,7 +184,9 @@ func (c cmdable) BitFieldRO(ctx context.Context, key string, values ...interface args[0] = "BITFIELD_RO" args[1] = key if len(values)%2 != 0 { - panic("BitFieldRO: invalid number of arguments, must be even") + c := NewIntSliceCmd(ctx) + c.SetErr(errors.New("BitFieldRO: invalid number of arguments, must be even")) + return c } for i := 0; i < len(values); i += 2 { args = append(args, "GET", values[i], values[i+1]) diff --git a/vendor/github.com/redis/go-redis/v9/cluster_commands.go b/vendor/github.com/redis/go-redis/v9/cluster_commands.go index 0caf0977a..4857b01ea 100644 --- a/vendor/github.com/redis/go-redis/v9/cluster_commands.go +++ b/vendor/github.com/redis/go-redis/v9/cluster_commands.go @@ -4,6 +4,7 @@ import "context" type ClusterCmdable interface { ClusterMyShardID(ctx context.Context) *StringCmd + ClusterMyID(ctx context.Context) *StringCmd ClusterSlots(ctx context.Context) *ClusterSlotsCmd ClusterShards(ctx context.Context) *ClusterShardsCmd ClusterLinks(ctx context.Context) *ClusterLinksCmd @@ -35,6 +36,12 @@ func (c cmdable) ClusterMyShardID(ctx context.Context) *StringCmd { return cmd } +func (c cmdable) ClusterMyID(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "cluster", "myid") + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd { cmd := NewClusterSlotsCmd(ctx, "cluster", "slots") _ = c(ctx, cmd) diff --git a/vendor/github.com/redis/go-redis/v9/command.go b/vendor/github.com/redis/go-redis/v9/command.go index 59ba08969..2dbc2ad87 100644 --- a/vendor/github.com/redis/go-redis/v9/command.go +++ b/vendor/github.com/redis/go-redis/v9/command.go @@ -17,6 +17,56 @@ import ( "github.com/redis/go-redis/v9/internal/util" ) +// keylessCommands contains Redis commands that have empty key specifications (9th slot empty) +// Only includes core Redis commands, excludes FT.*, ts.*, timeseries.*, search.* and subcommands +var keylessCommands = map[string]struct{}{ + "acl": {}, + "asking": {}, + "auth": {}, + "bgrewriteaof": {}, + "bgsave": {}, + "client": {}, + "cluster": {}, + "config": {}, + "debug": {}, + "discard": {}, + "echo": {}, + "exec": {}, + "failover": {}, + "function": {}, + "hello": {}, + "latency": {}, + "lolwut": {}, + "module": {}, + "monitor": {}, + "multi": {}, + "pfselftest": {}, + "ping": {}, + "psubscribe": {}, + "psync": {}, + "publish": {}, + "pubsub": {}, + "punsubscribe": {}, + "quit": {}, + "readonly": {}, + "readwrite": {}, + "replconf": {}, + "replicaof": {}, + "role": {}, + "save": {}, + "script": {}, + "select": {}, + "shutdown": {}, + "slaveof": {}, + "slowlog": {}, + "subscribe": {}, + "swapdb": {}, + "sync": {}, + "unsubscribe": {}, + "unwatch": {}, + "wait": {}, +} + type Cmder interface { // command name. // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster". @@ -40,7 +90,7 @@ type Cmder interface { readTimeout() *time.Duration readReply(rd *proto.Reader) error - + readRawReply(rd *proto.Reader) error SetErr(error) Err() error } @@ -75,12 +125,22 @@ func writeCmd(wr *proto.Writer, cmd Cmder) error { return wr.WriteArgs(cmd.Args()) } +// cmdFirstKeyPos returns the position of the first key in the command's arguments. +// If the command does not have a key, it returns 0. +// TODO: Use the data in CommandInfo to determine the first key position. func cmdFirstKeyPos(cmd Cmder) int { if pos := cmd.firstKeyPos(); pos != 0 { return int(pos) } - switch cmd.Name() { + name := cmd.Name() + + // first check if the command is keyless + if _, ok := keylessCommands[name]; ok { + return 0 + } + + switch name { case "eval", "evalsha", "eval_ro", "evalsha_ro": if cmd.stringArg(2) != "0" { return 3 @@ -122,11 +182,11 @@ func cmdString(cmd Cmder, val interface{}) string { //------------------------------------------------------------------------------ type baseCmd struct { - ctx context.Context - args []interface{} - err error - keyPos int8 - + ctx context.Context + args []interface{} + err error + keyPos int8 + rawVal interface{} _readTimeout *time.Duration } @@ -167,6 +227,8 @@ func (cmd *baseCmd) stringArg(pos int) string { switch v := arg.(type) { case string: return v + case []byte: + return string(v) default: // TODO: consider using appendArg return fmt.Sprint(v) @@ -197,6 +259,11 @@ func (cmd *baseCmd) setReadTimeout(d time.Duration) { cmd._readTimeout = &d } +func (cmd *baseCmd) readRawReply(rd *proto.Reader) (err error) { + cmd.rawVal, err = rd.ReadReply() + return err +} + //------------------------------------------------------------------------------ type Cmd struct { @@ -632,6 +699,68 @@ func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) { //------------------------------------------------------------------------------ +// DigestCmd is a command that returns a uint64 xxh3 hash digest. +// +// This command is specifically designed for the Redis DIGEST command, +// which returns the xxh3 hash of a key's value as a hex string. +// The hex string is automatically parsed to a uint64 value. +// +// The digest can be used for optimistic locking with SetIFDEQ, SetIFDNE, +// and DelExArgs commands. +// +// For examples of client-side digest generation and usage patterns, see: +// example/digest-optimistic-locking/ +// +// Redis 8.4+. See https://redis.io/commands/digest/ +type DigestCmd struct { + baseCmd + + val uint64 +} + +var _ Cmder = (*DigestCmd)(nil) + +func NewDigestCmd(ctx context.Context, args ...interface{}) *DigestCmd { + return &DigestCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *DigestCmd) SetVal(val uint64) { + cmd.val = val +} + +func (cmd *DigestCmd) Val() uint64 { + return cmd.val +} + +func (cmd *DigestCmd) Result() (uint64, error) { + return cmd.val, cmd.err +} + +func (cmd *DigestCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DigestCmd) readReply(rd *proto.Reader) (err error) { + // Redis DIGEST command returns a hex string (e.g., "a1b2c3d4e5f67890") + // We parse it as a uint64 xxh3 hash value + var hexStr string + hexStr, err = rd.ReadString() + if err != nil { + return err + } + + // Parse hex string to uint64 + cmd.val, err = strconv.ParseUint(hexStr, 16, 64) + return err +} + +//------------------------------------------------------------------------------ + type IntSliceCmd struct { baseCmd @@ -1398,27 +1527,64 @@ func (cmd *MapStringSliceInterfaceCmd) Val() map[string][]interface{} { } func (cmd *MapStringSliceInterfaceCmd) readReply(rd *proto.Reader) (err error) { - n, err := rd.ReadMapLen() + readType, err := rd.PeekReplyType() if err != nil { return err } - cmd.val = make(map[string][]interface{}, n) - for i := 0; i < n; i++ { - k, err := rd.ReadString() + + cmd.val = make(map[string][]interface{}) + + switch readType { + case proto.RespMap: + n, err := rd.ReadMapLen() if err != nil { return err } - nn, err := rd.ReadArrayLen() + for i := 0; i < n; i++ { + k, err := rd.ReadString() + if err != nil { + return err + } + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val[k] = make([]interface{}, nn) + for j := 0; j < nn; j++ { + value, err := rd.ReadReply() + if err != nil { + return err + } + cmd.val[k][j] = value + } + } + case proto.RespArray: + // RESP2 response + n, err := rd.ReadArrayLen() if err != nil { return err } - cmd.val[k] = make([]interface{}, nn) - for j := 0; j < nn; j++ { - value, err := rd.ReadReply() + + for i := 0; i < n; i++ { + // Each entry in this array is itself an array with key details + itemLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + + key, err := rd.ReadString() if err != nil { return err } - cmd.val[k][j] = value + cmd.val[key] = make([]interface{}, 0, itemLen-1) + for j := 1; j < itemLen; j++ { + // Read the inner array for timestamp-value pairs + data, err := rd.ReadReply() + if err != nil { + return err + } + cmd.val[key] = append(cmd.val[key], data) + } } } @@ -1482,6 +1648,12 @@ func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { type XMessage struct { ID string Values map[string]interface{} + // MillisElapsedFromDelivery is the number of milliseconds since the entry was last delivered. + // Only populated when using XREADGROUP with CLAIM argument for claimed entries. + MillisElapsedFromDelivery int64 + // DeliveredCount is the number of times the entry was delivered. + // Only populated when using XREADGROUP with CLAIM argument for claimed entries. + DeliveredCount int64 } type XMessageSliceCmd struct { @@ -1538,10 +1710,16 @@ func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) { } func readXMessage(rd *proto.Reader) (XMessage, error) { - if err := rd.ReadFixedArrayLen(2); err != nil { + // Read array length can be 2 or 4 (with CLAIM metadata) + n, err := rd.ReadArrayLen() + if err != nil { return XMessage{}, err } + if n != 2 && n != 4 { + return XMessage{}, fmt.Errorf("redis: got %d elements in the XMessage array, expected 2 or 4", n) + } + id, err := rd.ReadString() if err != nil { return XMessage{}, err @@ -1554,10 +1732,24 @@ func readXMessage(rd *proto.Reader) (XMessage, error) { } } - return XMessage{ + msg := XMessage{ ID: id, Values: v, - }, nil + } + + if n == 4 { + msg.MillisElapsedFromDelivery, err = rd.ReadInt() + if err != nil { + return XMessage{}, err + } + + msg.DeliveredCount, err = rd.ReadInt() + if err != nil { + return XMessage{}, err + } + } + + return msg, nil } func stringInterfaceMapParser(rd *proto.Reader) (map[string]interface{}, error) { @@ -2060,7 +2252,9 @@ type XInfoGroup struct { Pending int64 LastDeliveredID string EntriesRead int64 - Lag int64 + // Lag represents the number of pending messages in the stream not yet + // delivered to this consumer group. Returns -1 when the lag cannot be determined. + Lag int64 } var _ Cmder = (*XInfoGroupsCmd)(nil) @@ -2143,8 +2337,11 @@ func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { // lag: the number of entries in the stream that are still waiting to be delivered // to the group's consumers, or a NULL(Nil) when that number can't be determined. + // In that case, we return -1. if err != nil && err != Nil { return err + } else if err == Nil { + group.Lag = -1 } default: return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key) @@ -3535,15 +3732,14 @@ func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error return err } + lowerCmds := make(map[string]*CommandInfo, len(cmds)) + // Extensions have cmd names in upper case. Convert them to lower case. for k, v := range cmds { - lower := internal.ToLower(k) - if lower != k { - cmds[lower] = v - } + lowerCmds[internal.ToLower(k)] = v } - c.cmds = cmds + c.cmds = lowerCmds return nil }) return c.cmds, err @@ -3661,6 +3857,83 @@ func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error { //----------------------------------------------------------------------- +type Latency struct { + Name string + Time time.Time + Latest time.Duration + Max time.Duration +} + +type LatencyCmd struct { + baseCmd + val []Latency +} + +var _ Cmder = (*LatencyCmd)(nil) + +func NewLatencyCmd(ctx context.Context, args ...interface{}) *LatencyCmd { + return &LatencyCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *LatencyCmd) SetVal(val []Latency) { + cmd.val = val +} + +func (cmd *LatencyCmd) Val() []Latency { + return cmd.val +} + +func (cmd *LatencyCmd) Result() ([]Latency, error) { + return cmd.val, cmd.err +} + +func (cmd *LatencyCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *LatencyCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]Latency, n) + for i := 0; i < len(cmd.val); i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + if nn < 3 { + return fmt.Errorf("redis: got %d elements in latency get, expected at least 3", nn) + } + if cmd.val[i].Name, err = rd.ReadString(); err != nil { + return err + } + createdAt, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val[i].Time = time.Unix(createdAt, 0) + latest, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val[i].Latest = time.Duration(latest) * time.Millisecond + maximum, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val[i].Max = time.Duration(maximum) * time.Millisecond + } + return nil +} + +//----------------------------------------------------------------------- + type MapStringInterfaceCmd struct { baseCmd @@ -3787,6 +4060,84 @@ func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error { return nil } +// ----------------------------------------------------------------------- + +// MapMapStringInterfaceCmd represents a command that returns a map of strings to interface{}. +type MapMapStringInterfaceCmd struct { + baseCmd + val map[string]interface{} +} + +func NewMapMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapMapStringInterfaceCmd { + return &MapMapStringInterfaceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *MapMapStringInterfaceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *MapMapStringInterfaceCmd) SetVal(val map[string]interface{}) { + cmd.val = val +} + +func (cmd *MapMapStringInterfaceCmd) Result() (map[string]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *MapMapStringInterfaceCmd) Val() map[string]interface{} { + return cmd.val +} + +// readReply will try to parse the reply from the proto.Reader for both resp2 and resp3 +func (cmd *MapMapStringInterfaceCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadReply() + if err != nil { + return err + } + resultMap := map[string]interface{}{} + + switch midResponse := data.(type) { + case map[interface{}]interface{}: // resp3 will return map + for k, v := range midResponse { + stringKey, ok := k.(string) + if !ok { + return fmt.Errorf("redis: invalid map key %#v", k) + } + resultMap[stringKey] = v + } + case []interface{}: // resp2 will return array of arrays + n := len(midResponse) + for i := 0; i < n; i++ { + finalArr, ok := midResponse[i].([]interface{}) // final array that we need to transform to map + if !ok { + return fmt.Errorf("redis: unexpected response %#v", data) + } + m := len(finalArr) + if m%2 != 0 { // since this should be map, keys should be even number + return fmt.Errorf("redis: unexpected response %#v", data) + } + + for j := 0; j < m; j += 2 { + stringKey, ok := finalArr[j].(string) // the first one + if !ok { + return fmt.Errorf("redis: invalid map key %#v", finalArr[i]) + } + resultMap[stringKey] = finalArr[j+1] // second one is value + } + } + default: + return fmt.Errorf("redis: unexpected response %#v", data) + } + + cmd.val = resultMap + return nil +} + //----------------------------------------------------------------------- type MapStringInterfaceSliceCmd struct { @@ -5012,6 +5363,10 @@ type ClientInfo struct { OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full) OutputMemory int // omem, output buffer memory usage TotalMemory int // tot-mem, total memory consumed by this client in its various buffers + TotalNetIn int // tot-net-in, total network input + TotalNetOut int // tot-net-out, total network output + TotalCmds int // tot-cmds, total number of commands processed + IoThread int // io-thread id Events string // file descriptor events (see below) LastCmd string // cmd, last command played User string // the authenticated username of the client @@ -5176,6 +5531,12 @@ func parseClientInfo(txt string) (info *ClientInfo, err error) { info.OutputMemory, err = strconv.Atoi(val) case "tot-mem": info.TotalMemory, err = strconv.Atoi(val) + case "tot-net-in": + info.TotalNetIn, err = strconv.Atoi(val) + case "tot-net-out": + info.TotalNetOut, err = strconv.Atoi(val) + case "tot-cmds": + info.TotalCmds, err = strconv.Atoi(val) case "events": info.Events = val case "cmd": @@ -5190,6 +5551,8 @@ func parseClientInfo(txt string) (info *ClientInfo, err error) { info.LibName = val case "lib-ver": info.LibVer = val + case "io-thread": + info.IoThread, err = strconv.Atoi(val) default: return nil, fmt.Errorf("redis: unexpected client info key(%s)", key) } @@ -5369,8 +5732,6 @@ func (cmd *InfoCmd) readReply(rd *proto.Reader) error { section := "" scanner := bufio.NewScanner(strings.NewReader(val)) - moduleRe := regexp.MustCompile(`module:name=(.+?),(.+)$`) - for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, "#") { @@ -5381,6 +5742,7 @@ func (cmd *InfoCmd) readReply(rd *proto.Reader) error { cmd.val[section] = make(map[string]string) } else if line != "" { if section == "Modules" { + moduleRe := regexp.MustCompile(`module:name=(.+?),(.+)$`) kv := moduleRe.FindStringSubmatch(line) if len(kv) == 3 { cmd.val[section][kv[1]] = kv[2] @@ -5491,3 +5853,59 @@ func (cmd *MonitorCmd) Stop() { defer cmd.mu.Unlock() cmd.status = monitorStatusStop } + +type VectorScoreSliceCmd struct { + baseCmd + + val []VectorScore +} + +var _ Cmder = (*VectorScoreSliceCmd)(nil) + +func NewVectorInfoSliceCmd(ctx context.Context, args ...any) *VectorScoreSliceCmd { + return &VectorScoreSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *VectorScoreSliceCmd) SetVal(val []VectorScore) { + cmd.val = val +} + +func (cmd *VectorScoreSliceCmd) Val() []VectorScore { + return cmd.val +} + +func (cmd *VectorScoreSliceCmd) Result() ([]VectorScore, error) { + return cmd.val, cmd.err +} + +func (cmd *VectorScoreSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *VectorScoreSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + cmd.val = make([]VectorScore, n) + for i := 0; i < n; i++ { + name, err := rd.ReadString() + if err != nil { + return err + } + cmd.val[i].Name = name + + score, err := rd.ReadFloat() + if err != nil { + return err + } + cmd.val[i].Score = score + } + return nil +} diff --git a/vendor/github.com/redis/go-redis/v9/commands.go b/vendor/github.com/redis/go-redis/v9/commands.go index db5959446..daee5505e 100644 --- a/vendor/github.com/redis/go-redis/v9/commands.go +++ b/vendor/github.com/redis/go-redis/v9/commands.go @@ -81,6 +81,8 @@ func appendArg(dst []interface{}, arg interface{}) []interface{} { return dst case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP: return append(dst, arg) + case nil: + return dst default: // scan struct field v := reflect.ValueOf(arg) @@ -153,6 +155,12 @@ func isEmptyValue(v reflect.Value) bool { return v.Float() == 0 case reflect.Interface, reflect.Pointer: return v.IsNil() + case reflect.Struct: + if v.Type() == reflect.TypeOf(time.Time{}) { + return v.IsZero() + } + // Only supports the struct time.Time, + // subsequent iterations will follow the func Scan support decoder. } return false } @@ -185,6 +193,7 @@ type Cmdable interface { ClientID(ctx context.Context) *IntCmd ClientUnblock(ctx context.Context, id int64) *IntCmd ClientUnblockWithError(ctx context.Context, id int64) *IntCmd + ClientMaintNotifications(ctx context.Context, enabled bool, endpointType string) *StatusCmd ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd ConfigResetStat(ctx context.Context) *StatusCmd ConfigSet(ctx context.Context, parameter, value string) *StatusCmd @@ -202,16 +211,19 @@ type Cmdable interface { ShutdownNoSave(ctx context.Context) *StatusCmd SlaveOf(ctx context.Context, host, port string) *StatusCmd SlowLogGet(ctx context.Context, num int64) *SlowLogCmd + SlowLogLen(ctx context.Context) *IntCmd + SlowLogReset(ctx context.Context) *StatusCmd Time(ctx context.Context) *TimeCmd DebugObject(ctx context.Context, key string) *StringCmd MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd + Latency(ctx context.Context) *LatencyCmd + LatencyReset(ctx context.Context, events ...interface{}) *StatusCmd ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd ACLCmdable BitMapCmdable ClusterCmdable - GearsCmdable GenericCmdable GeoCmdable HashCmdable @@ -220,12 +232,14 @@ type Cmdable interface { ProbabilisticCmdable PubSubCmdable ScriptingFunctionsCmdable + SearchCmdable SetCmdable SortedSetCmdable StringCmdable StreamCmdable TimeseriesCmdable JSONCmdable + VectorSetCmdable } type StatefulCmdable interface { @@ -244,6 +258,7 @@ var ( _ Cmdable = (*Tx)(nil) _ Cmdable = (*Ring)(nil) _ Cmdable = (*ClusterClient)(nil) + _ Cmdable = (*Pipeline)(nil) ) type cmdable func(ctx context.Context, cmd Cmder) error @@ -330,7 +345,7 @@ func (info LibraryInfo) Validate() error { return nil } -// Hello Set the resp protocol used. +// Hello sets the resp protocol used. func (c statefulCmdable) Hello(ctx context.Context, ver int, username, password, clientName string, ) *MapStringInterfaceCmd { @@ -422,6 +437,12 @@ func (c cmdable) Ping(ctx context.Context) *StatusCmd { return cmd } +func (c cmdable) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) Quit(_ context.Context) *StatusCmd { panic("not implemented") } @@ -503,6 +524,23 @@ func (c cmdable) ClientInfo(ctx context.Context) *ClientInfoCmd { return cmd } +// ClientMaintNotifications enables or disables maintenance notifications for maintenance upgrades. +// When enabled, the client will receive push notifications about Redis maintenance events. +func (c cmdable) ClientMaintNotifications(ctx context.Context, enabled bool, endpointType string) *StatusCmd { + args := []interface{}{"client", "maint_notifications"} + if enabled { + if endpointType == "" { + endpointType = "none" + } + args = append(args, "on", "moving-endpoint-type", endpointType) + } else { + args = append(args, "off") + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + // ------------------------------------------------------------------------------------------------ func (c cmdable) ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd { @@ -639,6 +677,34 @@ func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd { return cmd } +func (c cmdable) SlowLogLen(ctx context.Context) *IntCmd { + cmd := NewIntCmd(ctx, "slowlog", "len") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SlowLogReset(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "slowlog", "reset") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Latency(ctx context.Context) *LatencyCmd { + cmd := NewLatencyCmd(ctx, "latency", "latest") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LatencyReset(ctx context.Context, events ...interface{}) *StatusCmd { + args := make([]interface{}, 2+len(events)) + args[0] = "latency" + args[1] = "reset" + copy(args[2:], events) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) Sync(_ context.Context) { panic("not implemented") } @@ -659,7 +725,9 @@ func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *I args := []interface{}{"memory", "usage", key} if len(samples) > 0 { if len(samples) != 1 { - panic("MemoryUsage expects single sample count") + cmd := NewIntCmd(ctx) + cmd.SetErr(errors.New("MemoryUsage expects single sample count")) + return cmd } args = append(args, "SAMPLES", samples[0]) } diff --git a/vendor/github.com/redis/go-redis/v9/docker-compose.yml b/vendor/github.com/redis/go-redis/v9/docker-compose.yml new file mode 100644 index 000000000..5ffedb0a5 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/docker-compose.yml @@ -0,0 +1,106 @@ +--- + +services: + redis: + image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:8.4.0} + platform: linux/amd64 + container_name: redis-standalone + environment: + - TLS_ENABLED=yes + - REDIS_CLUSTER=no + - PORT=6379 + - TLS_PORT=6666 + command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} + ports: + - 6379:6379 + - 6666:6666 # TLS port + volumes: + - "./dockers/standalone:/redis/work" + profiles: + - standalone + - sentinel + - all-stack + - all + + osscluster: + image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:8.4.0} + platform: linux/amd64 + container_name: redis-osscluster + environment: + - NODES=6 + - PORT=16600 + command: "--cluster-enabled yes" + ports: + - "16600-16605:16600-16605" + volumes: + - "./dockers/osscluster:/redis/work" + profiles: + - cluster + - all-stack + - all + + sentinel-cluster: + image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:8.4.0} + platform: linux/amd64 + container_name: redis-sentinel-cluster + network_mode: "host" + environment: + - NODES=3 + - TLS_ENABLED=yes + - REDIS_CLUSTER=no + - PORT=9121 + command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} + #ports: + # - "9121-9123:9121-9123" + volumes: + - "./dockers/sentinel-cluster:/redis/work" + profiles: + - sentinel + - all-stack + - all + + sentinel: + image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:8.4.0} + platform: linux/amd64 + container_name: redis-sentinel + depends_on: + - sentinel-cluster + environment: + - NODES=3 + - REDIS_CLUSTER=no + - PORT=26379 + command: ${REDIS_EXTRA_ARGS:---sentinel} + network_mode: "host" + #ports: + # - 26379:26379 + # - 26380:26380 + # - 26381:26381 + volumes: + - "./dockers/sentinel.conf:/redis/config-default/redis.conf" + - "./dockers/sentinel:/redis/work" + profiles: + - sentinel + - all-stack + - all + + ring-cluster: + image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:8.4.0} + platform: linux/amd64 + container_name: redis-ring-cluster + environment: + - NODES=3 + - TLS_ENABLED=yes + - REDIS_CLUSTER=no + - PORT=6390 + command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} + ports: + - 6390:6390 + - 6391:6391 + - 6392:6392 + volumes: + - "./dockers/ring:/redis/work" + profiles: + - ring + - cluster + - all-stack + - all diff --git a/vendor/github.com/redis/go-redis/v9/error.go b/vendor/github.com/redis/go-redis/v9/error.go index 9b348193a..12b5604df 100644 --- a/vendor/github.com/redis/go-redis/v9/error.go +++ b/vendor/github.com/redis/go-redis/v9/error.go @@ -15,6 +15,19 @@ import ( // ErrClosed performs any operation on the closed client will return this error. var ErrClosed = pool.ErrClosed +// ErrPoolExhausted is returned from a pool connection method +// when the maximum number of database connections in the pool has been reached. +var ErrPoolExhausted = pool.ErrPoolExhausted + +// ErrPoolTimeout timed out waiting to get a connection from the connection pool. +var ErrPoolTimeout = pool.ErrPoolTimeout + +// ErrCrossSlot is returned when keys are used in the same Redis command and +// the keys are not in the same hash slot. This error is returned by Redis +// Cluster and will be returned by the client when TxPipeline or TxPipelined +// is used on a ClusterClient with keys in different slots. +var ErrCrossSlot = proto.RedisError("CROSSSLOT Keys in request don't hash to the same slot") + // HasErrorPrefix checks if the err is a Redis error and the message contains a prefix. func HasErrorPrefix(err error, prefix string) bool { var rErr Error @@ -38,23 +51,83 @@ type Error interface { var _ Error = proto.RedisError("") +func isContextError(err error) bool { + // Check for wrapped context errors using errors.Is + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) +} + +// isTimeoutError checks if an error is a timeout error, even if wrapped. +// Returns (isTimeout, shouldRetryOnTimeout) where: +// - isTimeout: true if the error is any kind of timeout error +// - shouldRetryOnTimeout: true if Timeout() method returns true +func isTimeoutError(err error) (isTimeout bool, hasTimeoutFlag bool) { + // Check for timeoutError interface (works with wrapped errors) + var te timeoutError + if errors.As(err, &te) { + return true, te.Timeout() + } + + // Check for net.Error specifically (common case for network timeouts) + var netErr net.Error + if errors.As(err, &netErr) { + return true, netErr.Timeout() + } + + return false, false +} + func shouldRetry(err error, retryTimeout bool) bool { - switch err { - case io.EOF, io.ErrUnexpectedEOF: + if err == nil { + return false + } + + // Check for EOF errors (works with wrapped errors) + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { return true - case nil, context.Canceled, context.DeadlineExceeded: + } + + // Check for context errors (works with wrapped errors) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return false } - if v, ok := err.(timeoutError); ok { - if v.Timeout() { + // Check for pool timeout (works with wrapped errors) + if errors.Is(err, pool.ErrPoolTimeout) { + // connection pool timeout, increase retries. #3289 + return true + } + + // Check for timeout errors (works with wrapped errors) + if isTimeout, hasTimeoutFlag := isTimeoutError(err); isTimeout { + if hasTimeoutFlag { return retryTimeout } return true } + // Check for typed Redis errors using errors.As (works with wrapped errors) + if proto.IsMaxClientsError(err) { + return true + } + if proto.IsLoadingError(err) { + return true + } + if proto.IsReadOnlyError(err) { + return true + } + if proto.IsMasterDownError(err) { + return true + } + if proto.IsClusterDownError(err) { + return true + } + if proto.IsTryAgainError(err) { + return true + } + + // Fallback to string checking for backward compatibility with plain errors s := err.Error() - if s == "ERR max number of clients reached" { + if strings.HasPrefix(s, "ERR max number of clients reached") { return true } if strings.HasPrefix(s, "LOADING ") { @@ -69,20 +142,36 @@ func shouldRetry(err error, retryTimeout bool) bool { if strings.HasPrefix(s, "TRYAGAIN ") { return true } + if strings.HasPrefix(s, "MASTERDOWN ") { + return true + } return false } func isRedisError(err error) bool { - _, ok := err.(proto.RedisError) - return ok + // Check if error implements the Error interface (works with wrapped errors) + var redisErr Error + if errors.As(err, &redisErr) { + return true + } + // Also check for proto.RedisError specifically + var protoRedisErr proto.RedisError + return errors.As(err, &protoRedisErr) } func isBadConn(err error, allowTimeout bool, addr string) bool { - switch err { - case nil: + if err == nil { return false - case context.Canceled, context.DeadlineExceeded: + } + + // Check for context errors (works with wrapped errors) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return true + } + + // Check for pool timeout errors (works with wrapped errors) + if errors.Is(err, pool.ErrConnUnusableTimeout) { return true } @@ -103,7 +192,9 @@ func isBadConn(err error, allowTimeout bool, addr string) bool { } if allowTimeout { - if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + // Check for network timeout errors (works with wrapped errors) + var netErr net.Error + if errors.As(err, &netErr) && netErr.Timeout() { return false } } @@ -112,44 +203,143 @@ func isBadConn(err error, allowTimeout bool, addr string) bool { } func isMovedError(err error) (moved bool, ask bool, addr string) { - if !isRedisError(err) { - return + // Check for typed MovedError + if movedErr, ok := proto.IsMovedError(err); ok { + addr = movedErr.Addr() + addr = internal.GetAddr(addr) + return true, false, addr } - s := err.Error() - switch { - case strings.HasPrefix(s, "MOVED "): - moved = true - case strings.HasPrefix(s, "ASK "): - ask = true - default: - return + // Check for typed AskError + if askErr, ok := proto.IsAskError(err); ok { + addr = askErr.Addr() + addr = internal.GetAddr(addr) + return false, true, addr } - ind := strings.LastIndex(s, " ") - if ind == -1 { - return false, false, "" + // Fallback to string checking for backward compatibility + s := err.Error() + if strings.HasPrefix(s, "MOVED ") { + // Parse: MOVED 3999 127.0.0.1:6381 + parts := strings.Split(s, " ") + if len(parts) == 3 { + addr = internal.GetAddr(parts[2]) + return true, false, addr + } + } + if strings.HasPrefix(s, "ASK ") { + // Parse: ASK 3999 127.0.0.1:6381 + parts := strings.Split(s, " ") + if len(parts) == 3 { + addr = internal.GetAddr(parts[2]) + return false, true, addr + } } - addr = s[ind+1:] - addr = internal.GetAddr(addr) - return + return false, false, "" } func isLoadingError(err error) bool { - return strings.HasPrefix(err.Error(), "LOADING ") + return proto.IsLoadingError(err) } func isReadOnlyError(err error) bool { - return strings.HasPrefix(err.Error(), "READONLY ") + return proto.IsReadOnlyError(err) } func isMovedSameConnAddr(err error, addr string) bool { - redisError := err.Error() - if !strings.HasPrefix(redisError, "MOVED ") { - return false + if movedErr, ok := proto.IsMovedError(err); ok { + return strings.HasSuffix(movedErr.Addr(), addr) + } + return false +} + +//------------------------------------------------------------------------------ + +// Typed error checking functions for public use. +// These functions work correctly even when errors are wrapped in hooks. + +// IsLoadingError checks if an error is a Redis LOADING error, even if wrapped. +// LOADING errors occur when Redis is loading the dataset in memory. +func IsLoadingError(err error) bool { + return proto.IsLoadingError(err) +} + +// IsReadOnlyError checks if an error is a Redis READONLY error, even if wrapped. +// READONLY errors occur when trying to write to a read-only replica. +func IsReadOnlyError(err error) bool { + return proto.IsReadOnlyError(err) +} + +// IsClusterDownError checks if an error is a Redis CLUSTERDOWN error, even if wrapped. +// CLUSTERDOWN errors occur when the cluster is down. +func IsClusterDownError(err error) bool { + return proto.IsClusterDownError(err) +} + +// IsTryAgainError checks if an error is a Redis TRYAGAIN error, even if wrapped. +// TRYAGAIN errors occur when a command cannot be processed and should be retried. +func IsTryAgainError(err error) bool { + return proto.IsTryAgainError(err) +} + +// IsMasterDownError checks if an error is a Redis MASTERDOWN error, even if wrapped. +// MASTERDOWN errors occur when the master is down. +func IsMasterDownError(err error) bool { + return proto.IsMasterDownError(err) +} + +// IsMaxClientsError checks if an error is a Redis max clients error, even if wrapped. +// This error occurs when the maximum number of clients has been reached. +func IsMaxClientsError(err error) bool { + return proto.IsMaxClientsError(err) +} + +// IsMovedError checks if an error is a Redis MOVED error, even if wrapped. +// MOVED errors occur in cluster mode when a key has been moved to a different node. +// Returns the address of the node where the key has been moved and a boolean indicating if it's a MOVED error. +func IsMovedError(err error) (addr string, ok bool) { + if movedErr, isMovedErr := proto.IsMovedError(err); isMovedErr { + return movedErr.Addr(), true } - return strings.HasSuffix(redisError, " "+addr) + return "", false +} + +// IsAskError checks if an error is a Redis ASK error, even if wrapped. +// ASK errors occur in cluster mode when a key is being migrated and the client should ask another node. +// Returns the address of the node to ask and a boolean indicating if it's an ASK error. +func IsAskError(err error) (addr string, ok bool) { + if askErr, isAskErr := proto.IsAskError(err); isAskErr { + return askErr.Addr(), true + } + return "", false +} + +// IsAuthError checks if an error is a Redis authentication error, even if wrapped. +// Authentication errors occur when: +// - NOAUTH: Redis requires authentication but none was provided +// - WRONGPASS: Redis authentication failed due to incorrect password +// - unauthenticated: Error returned when password changed +func IsAuthError(err error) bool { + return proto.IsAuthError(err) +} + +// IsPermissionError checks if an error is a Redis permission error, even if wrapped. +// Permission errors (NOPERM) occur when a user does not have permission to execute a command. +func IsPermissionError(err error) bool { + return proto.IsPermissionError(err) +} + +// IsExecAbortError checks if an error is a Redis EXECABORT error, even if wrapped. +// EXECABORT errors occur when a transaction is aborted. +func IsExecAbortError(err error) bool { + return proto.IsExecAbortError(err) +} + +// IsOOMError checks if an error is a Redis OOM (Out Of Memory) error, even if wrapped. +// OOM errors occur when Redis is out of memory. +func IsOOMError(err error) bool { + return proto.IsOOMError(err) } //------------------------------------------------------------------------------ diff --git a/vendor/github.com/redis/go-redis/v9/gears_commands.go b/vendor/github.com/redis/go-redis/v9/gears_commands.go deleted file mode 100644 index e0d49a6b7..000000000 --- a/vendor/github.com/redis/go-redis/v9/gears_commands.go +++ /dev/null @@ -1,149 +0,0 @@ -package redis - -import ( - "context" - "fmt" - "strings" -) - -type GearsCmdable interface { - TFunctionLoad(ctx context.Context, lib string) *StatusCmd - TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd - TFunctionDelete(ctx context.Context, libName string) *StatusCmd - TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd - TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd - TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd - TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd - TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd - TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd -} - -type TFunctionLoadOptions struct { - Replace bool - Config string -} - -type TFunctionListOptions struct { - Withcode bool - Verbose int - Library string -} - -type TFCallOptions struct { - Keys []string - Arguments []string -} - -// TFunctionLoad - load a new JavaScript library into Redis. -// For more information - https://redis.io/commands/tfunction-load/ -func (c cmdable) TFunctionLoad(ctx context.Context, lib string) *StatusCmd { - args := []interface{}{"TFUNCTION", "LOAD", lib} - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd { - args := []interface{}{"TFUNCTION", "LOAD"} - if options != nil { - if options.Replace { - args = append(args, "REPLACE") - } - if options.Config != "" { - args = append(args, "CONFIG", options.Config) - } - } - args = append(args, lib) - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// TFunctionDelete - delete a JavaScript library from Redis. -// For more information - https://redis.io/commands/tfunction-delete/ -func (c cmdable) TFunctionDelete(ctx context.Context, libName string) *StatusCmd { - args := []interface{}{"TFUNCTION", "DELETE", libName} - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// TFunctionList - list the functions with additional information about each function. -// For more information - https://redis.io/commands/tfunction-list/ -func (c cmdable) TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd { - args := []interface{}{"TFUNCTION", "LIST"} - cmd := NewMapStringInterfaceSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd { - args := []interface{}{"TFUNCTION", "LIST"} - if options != nil { - if options.Withcode { - args = append(args, "WITHCODE") - } - if options.Verbose != 0 { - v := strings.Repeat("v", options.Verbose) - args = append(args, v) - } - if options.Library != "" { - args = append(args, "LIBRARY", options.Library) - } - } - cmd := NewMapStringInterfaceSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// TFCall - invoke a function. -// For more information - https://redis.io/commands/tfcall/ -func (c cmdable) TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd { - lf := libName + "." + funcName - args := []interface{}{"TFCALL", lf, numKeys} - cmd := NewCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd { - lf := libName + "." + funcName - args := []interface{}{"TFCALL", lf, numKeys} - if options != nil { - for _, key := range options.Keys { - args = append(args, key) - } - for _, key := range options.Arguments { - args = append(args, key) - } - } - cmd := NewCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// TFCallASYNC - invoke an asynchronous JavaScript function (coroutine). -// For more information - https://redis.io/commands/TFCallASYNC/ -func (c cmdable) TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd { - lf := fmt.Sprintf("%s.%s", libName, funcName) - args := []interface{}{"TFCALLASYNC", lf, numKeys} - cmd := NewCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd { - lf := fmt.Sprintf("%s.%s", libName, funcName) - args := []interface{}{"TFCALLASYNC", lf, numKeys} - if options != nil { - for _, key := range options.Keys { - args = append(args, key) - } - for _, key := range options.Arguments { - args = append(args, key) - } - } - cmd := NewCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} diff --git a/vendor/github.com/redis/go-redis/v9/generic_commands.go b/vendor/github.com/redis/go-redis/v9/generic_commands.go index dc6c3fe01..c7100222c 100644 --- a/vendor/github.com/redis/go-redis/v9/generic_commands.go +++ b/vendor/github.com/redis/go-redis/v9/generic_commands.go @@ -3,6 +3,8 @@ package redis import ( "context" "time" + + "github.com/redis/go-redis/v9/internal/hashtag" ) type GenericCmdable interface { @@ -363,6 +365,9 @@ func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count in args = append(args, "count", count) } cmd := NewScanCmd(ctx, c, args...) + if hashtag.Present(match) { + cmd.SetFirstKeyPos(3) + } _ = c(ctx, cmd) return cmd } @@ -379,6 +384,9 @@ func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, coun args = append(args, "type", keyType) } cmd := NewScanCmd(ctx, c, args...) + if hashtag.Present(match) { + cmd.SetFirstKeyPos(3) + } _ = c(ctx, cmd) return cmd } diff --git a/vendor/github.com/redis/go-redis/v9/hash_commands.go b/vendor/github.com/redis/go-redis/v9/hash_commands.go index dcffdcdd9..b78860a5a 100644 --- a/vendor/github.com/redis/go-redis/v9/hash_commands.go +++ b/vendor/github.com/redis/go-redis/v9/hash_commands.go @@ -3,6 +3,8 @@ package redis import ( "context" "time" + + "github.com/redis/go-redis/v9/internal/hashtag" ) type HashCmdable interface { @@ -10,6 +12,9 @@ type HashCmdable interface { HExists(ctx context.Context, key, field string) *BoolCmd HGet(ctx context.Context, key, field string) *StringCmd HGetAll(ctx context.Context, key string) *MapStringStringCmd + HGetDel(ctx context.Context, key string, fields ...string) *StringSliceCmd + HGetEX(ctx context.Context, key string, fields ...string) *StringSliceCmd + HGetEXWithArgs(ctx context.Context, key string, options *HGetEXOptions, fields ...string) *StringSliceCmd HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd HKeys(ctx context.Context, key string) *StringSliceCmd @@ -17,12 +22,15 @@ type HashCmdable interface { HMGet(ctx context.Context, key string, fields ...string) *SliceCmd HSet(ctx context.Context, key string, values ...interface{}) *IntCmd HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd + HSetEX(ctx context.Context, key string, fieldsAndValues ...string) *IntCmd + HSetEXWithArgs(ctx context.Context, key string, options *HSetEXOptions, fieldsAndValues ...string) *IntCmd HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd HScanNoValues(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd HVals(ctx context.Context, key string) *StringSliceCmd HRandField(ctx context.Context, key string, count int) *StringSliceCmd HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd + HStrLen(ctx context.Context, key, field string) *IntCmd HExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd HExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd HPExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd @@ -108,16 +116,16 @@ func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *Slice // HSet accepts values in following formats: // -// - HSet("myhash", "key1", "value1", "key2", "value2") +// - HSet(ctx, "myhash", "key1", "value1", "key2", "value2") // -// - HSet("myhash", []string{"key1", "value1", "key2", "value2"}) +// - HSet(ctx, "myhash", []string{"key1", "value1", "key2", "value2"}) // -// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"}) +// - HSet(ctx, "myhash", map[string]interface{}{"key1": "value1", "key2": "value2"}) // // Playing struct With "redis" tag. // type MyHash struct { Key1 string `redis:"key1"`; Key2 int `redis:"key2"` } // -// - HSet("myhash", MyHash{"value1", "value2"}) Warn: redis-server >= 4.0 +// - HSet(ctx, "myhash", MyHash{"value1", "value2"}) Warn: redis-server >= 4.0 // // For struct, can be a structure pointer type, we only parse the field whose tag is redis. // if you don't want the field to be read, you can use the `redis:"-"` flag to ignore it, @@ -186,10 +194,18 @@ func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match str args = append(args, "count", count) } cmd := NewScanCmd(ctx, c, args...) + if hashtag.Present(match) { + cmd.SetFirstKeyPos(4) + } _ = c(ctx, cmd) return cmd } +func (c cmdable) HStrLen(ctx context.Context, key, field string) *IntCmd { + cmd := NewIntCmd(ctx, "hstrlen", key, field) + _ = c(ctx, cmd) + return cmd +} func (c cmdable) HScanNoValues(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { args := []interface{}{"hscan", key, cursor} if match != "" { @@ -200,6 +216,9 @@ func (c cmdable) HScanNoValues(ctx context.Context, key string, cursor uint64, m } args = append(args, "novalues") cmd := NewScanCmd(ctx, c, args...) + if hashtag.Present(match) { + cmd.SetFirstKeyPos(4) + } _ = c(ctx, cmd) return cmd } @@ -213,7 +232,10 @@ type HExpireArgs struct { // HExpire - Sets the expiration time for specified fields in a hash in seconds. // The command constructs an argument list starting with "HEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. -// For more information - https://redis.io/commands/hexpire/ +// Available since Redis 7.4 CE. +// For more information refer to [HEXPIRE Documentation]. +// +// [HEXPIRE Documentation]: https://redis.io/commands/hexpire/ func (c cmdable) HExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd { args := []interface{}{"HEXPIRE", key, formatSec(ctx, expiration), "FIELDS", len(fields)} @@ -225,10 +247,13 @@ func (c cmdable) HExpire(ctx context.Context, key string, expiration time.Durati return cmd } -// HExpire - Sets the expiration time for specified fields in a hash in seconds. +// HExpireWithArgs - Sets the expiration time for specified fields in a hash in seconds. // It requires a key, an expiration duration, a struct with boolean flags for conditional expiration settings (NX, XX, GT, LT), and a list of fields. // The command constructs an argument list starting with "HEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. -// For more information - https://redis.io/commands/hexpire/ +// Available since Redis 7.4 CE. +// For more information refer to [HEXPIRE Documentation]. +// +// [HEXPIRE Documentation]: https://redis.io/commands/hexpire/ func (c cmdable) HExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { args := []interface{}{"HEXPIRE", key, formatSec(ctx, expiration)} @@ -257,7 +282,10 @@ func (c cmdable) HExpireWithArgs(ctx context.Context, key string, expiration tim // HPExpire - Sets the expiration time for specified fields in a hash in milliseconds. // Similar to HExpire, it accepts a key, an expiration duration in milliseconds, a struct with expiration condition flags, and a list of fields. // The command modifies the standard time.Duration to milliseconds for the Redis command. -// For more information - https://redis.io/commands/hpexpire/ +// Available since Redis 7.4 CE. +// For more information refer to [HPEXPIRE Documentation]. +// +// [HPEXPIRE Documentation]: https://redis.io/commands/hpexpire/ func (c cmdable) HPExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd { args := []interface{}{"HPEXPIRE", key, formatMs(ctx, expiration), "FIELDS", len(fields)} @@ -269,6 +297,13 @@ func (c cmdable) HPExpire(ctx context.Context, key string, expiration time.Durat return cmd } +// HPExpireWithArgs - Sets the expiration time for specified fields in a hash in milliseconds. +// It requires a key, an expiration duration, a struct with boolean flags for conditional expiration settings (NX, XX, GT, LT), and a list of fields. +// The command constructs an argument list starting with "HPEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. +// Available since Redis 7.4 CE. +// For more information refer to [HPEXPIRE Documentation]. +// +// [HPEXPIRE Documentation]: https://redis.io/commands/hpexpire/ func (c cmdable) HPExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { args := []interface{}{"HPEXPIRE", key, formatMs(ctx, expiration)} @@ -297,7 +332,10 @@ func (c cmdable) HPExpireWithArgs(ctx context.Context, key string, expiration ti // HExpireAt - Sets the expiration time for specified fields in a hash to a UNIX timestamp in seconds. // Takes a key, a UNIX timestamp, a struct of conditional flags, and a list of fields. // The command sets absolute expiration times based on the UNIX timestamp provided. -// For more information - https://redis.io/commands/hexpireat/ +// Available since Redis 7.4 CE. +// For more information refer to [HExpireAt Documentation]. +// +// [HExpireAt Documentation]: https://redis.io/commands/hexpireat/ func (c cmdable) HExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd { args := []interface{}{"HEXPIREAT", key, tm.Unix(), "FIELDS", len(fields)} @@ -337,7 +375,10 @@ func (c cmdable) HExpireAtWithArgs(ctx context.Context, key string, tm time.Time // HPExpireAt - Sets the expiration time for specified fields in a hash to a UNIX timestamp in milliseconds. // Similar to HExpireAt but for timestamps in milliseconds. It accepts the same parameters and adjusts the UNIX time to milliseconds. -// For more information - https://redis.io/commands/hpexpireat/ +// Available since Redis 7.4 CE. +// For more information refer to [HExpireAt Documentation]. +// +// [HExpireAt Documentation]: https://redis.io/commands/hexpireat/ func (c cmdable) HPExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd { args := []interface{}{"HPEXPIREAT", key, tm.UnixNano() / int64(time.Millisecond), "FIELDS", len(fields)} @@ -377,7 +418,10 @@ func (c cmdable) HPExpireAtWithArgs(ctx context.Context, key string, tm time.Tim // HPersist - Removes the expiration time from specified fields in a hash. // Accepts a key and the fields themselves. // This command ensures that each field specified will have its expiration removed if present. -// For more information - https://redis.io/commands/hpersist/ +// Available since Redis 7.4 CE. +// For more information refer to [HPersist Documentation]. +// +// [HPersist Documentation]: https://redis.io/commands/hpersist/ func (c cmdable) HPersist(ctx context.Context, key string, fields ...string) *IntSliceCmd { args := []interface{}{"HPERSIST", key, "FIELDS", len(fields)} @@ -392,6 +436,10 @@ func (c cmdable) HPersist(ctx context.Context, key string, fields ...string) *In // HExpireTime - Retrieves the expiration time for specified fields in a hash as a UNIX timestamp in seconds. // Requires a key and the fields themselves to fetch their expiration timestamps. // This command returns the expiration times for each field or error/status codes for each field as specified. +// Available since Redis 7.4 CE. +// For more information refer to [HExpireTime Documentation]. +// +// [HExpireTime Documentation]: https://redis.io/commands/hexpiretime/ // For more information - https://redis.io/commands/hexpiretime/ func (c cmdable) HExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd { args := []interface{}{"HEXPIRETIME", key, "FIELDS", len(fields)} @@ -407,6 +455,10 @@ func (c cmdable) HExpireTime(ctx context.Context, key string, fields ...string) // HPExpireTime - Retrieves the expiration time for specified fields in a hash as a UNIX timestamp in milliseconds. // Similar to HExpireTime, adjusted for timestamps in milliseconds. It requires the same parameters. // Provides the expiration timestamp for each field in milliseconds. +// Available since Redis 7.4 CE. +// For more information refer to [HExpireTime Documentation]. +// +// [HExpireTime Documentation]: https://redis.io/commands/hexpiretime/ // For more information - https://redis.io/commands/hexpiretime/ func (c cmdable) HPExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd { args := []interface{}{"HPEXPIRETIME", key, "FIELDS", len(fields)} @@ -422,7 +474,10 @@ func (c cmdable) HPExpireTime(ctx context.Context, key string, fields ...string) // HTTL - Retrieves the remaining time to live for specified fields in a hash in seconds. // Requires a key and the fields themselves. It returns the TTL for each specified field. // This command fetches the TTL in seconds for each field or returns error/status codes as appropriate. -// For more information - https://redis.io/commands/httl/ +// Available since Redis 7.4 CE. +// For more information refer to [HTTL Documentation]. +// +// [HTTL Documentation]: https://redis.io/commands/httl/ func (c cmdable) HTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd { args := []interface{}{"HTTL", key, "FIELDS", len(fields)} @@ -437,6 +492,10 @@ func (c cmdable) HTTL(ctx context.Context, key string, fields ...string) *IntSli // HPTTL - Retrieves the remaining time to live for specified fields in a hash in milliseconds. // Similar to HTTL, but returns the TTL in milliseconds. It requires a key and the specified fields. // This command provides the TTL in milliseconds for each field or returns error/status codes as needed. +// Available since Redis 7.4 CE. +// For more information refer to [HPTTL Documentation]. +// +// [HPTTL Documentation]: https://redis.io/commands/hpttl/ // For more information - https://redis.io/commands/hpttl/ func (c cmdable) HPTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd { args := []interface{}{"HPTTL", key, "FIELDS", len(fields)} @@ -448,3 +507,113 @@ func (c cmdable) HPTTL(ctx context.Context, key string, fields ...string) *IntSl _ = c(ctx, cmd) return cmd } + +func (c cmdable) HGetDel(ctx context.Context, key string, fields ...string) *StringSliceCmd { + args := []interface{}{"HGETDEL", key, "FIELDS", len(fields)} + for _, field := range fields { + args = append(args, field) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGetEX(ctx context.Context, key string, fields ...string) *StringSliceCmd { + args := []interface{}{"HGETEX", key, "FIELDS", len(fields)} + for _, field := range fields { + args = append(args, field) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HGetEXExpirationType represents an expiration option for the HGETEX command. +type HGetEXExpirationType string + +const ( + HGetEXExpirationEX HGetEXExpirationType = "EX" + HGetEXExpirationPX HGetEXExpirationType = "PX" + HGetEXExpirationEXAT HGetEXExpirationType = "EXAT" + HGetEXExpirationPXAT HGetEXExpirationType = "PXAT" + HGetEXExpirationPERSIST HGetEXExpirationType = "PERSIST" +) + +type HGetEXOptions struct { + ExpirationType HGetEXExpirationType + ExpirationVal int64 +} + +func (c cmdable) HGetEXWithArgs(ctx context.Context, key string, options *HGetEXOptions, fields ...string) *StringSliceCmd { + args := []interface{}{"HGETEX", key} + if options.ExpirationType != "" { + args = append(args, string(options.ExpirationType)) + if options.ExpirationType != HGetEXExpirationPERSIST { + args = append(args, options.ExpirationVal) + } + } + + args = append(args, "FIELDS", len(fields)) + for _, field := range fields { + args = append(args, field) + } + + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type HSetEXCondition string + +const ( + HSetEXFNX HSetEXCondition = "FNX" // Only set the fields if none of them already exist. + HSetEXFXX HSetEXCondition = "FXX" // Only set the fields if all already exist. +) + +type HSetEXExpirationType string + +const ( + HSetEXExpirationEX HSetEXExpirationType = "EX" + HSetEXExpirationPX HSetEXExpirationType = "PX" + HSetEXExpirationEXAT HSetEXExpirationType = "EXAT" + HSetEXExpirationPXAT HSetEXExpirationType = "PXAT" + HSetEXExpirationKEEPTTL HSetEXExpirationType = "KEEPTTL" +) + +type HSetEXOptions struct { + Condition HSetEXCondition + ExpirationType HSetEXExpirationType + ExpirationVal int64 +} + +func (c cmdable) HSetEX(ctx context.Context, key string, fieldsAndValues ...string) *IntCmd { + args := []interface{}{"HSETEX", key, "FIELDS", len(fieldsAndValues) / 2} + for _, field := range fieldsAndValues { + args = append(args, field) + } + + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HSetEXWithArgs(ctx context.Context, key string, options *HSetEXOptions, fieldsAndValues ...string) *IntCmd { + args := []interface{}{"HSETEX", key} + if options.Condition != "" { + args = append(args, string(options.Condition)) + } + if options.ExpirationType != "" { + args = append(args, string(options.ExpirationType)) + if options.ExpirationType != HSetEXExpirationKEEPTTL { + args = append(args, options.ExpirationVal) + } + } + args = append(args, "FIELDS", len(fieldsAndValues)/2) + for _, field := range fieldsAndValues { + args = append(args, field) + } + + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/conn_reauth_credentials_listener.go b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/conn_reauth_credentials_listener.go new file mode 100644 index 000000000..22bfedf71 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/conn_reauth_credentials_listener.go @@ -0,0 +1,100 @@ +package streaming + +import ( + "github.com/redis/go-redis/v9/auth" + "github.com/redis/go-redis/v9/internal/pool" +) + +// ConnReAuthCredentialsListener is a credentials listener for a specific connection +// that triggers re-authentication when credentials change. +// +// This listener implements the auth.CredentialsListener interface and is subscribed +// to a StreamingCredentialsProvider. When new credentials are received via OnNext, +// it marks the connection for re-authentication through the manager. +// +// The re-authentication is always performed asynchronously to avoid blocking the +// credentials provider and to prevent potential deadlocks with the pool semaphore. +// The actual re-auth happens when the connection is returned to the pool in an idle state. +// +// Lifecycle: +// - Created during connection initialization via Manager.Listener() +// - Subscribed to the StreamingCredentialsProvider +// - Receives credential updates via OnNext() +// - Cleaned up when connection is removed from pool via Manager.RemoveListener() +type ConnReAuthCredentialsListener struct { + // reAuth is the function to re-authenticate the connection with new credentials + reAuth func(conn *pool.Conn, credentials auth.Credentials) error + + // onErr is the function to call when re-authentication or acquisition fails + onErr func(conn *pool.Conn, err error) + + // conn is the connection this listener is associated with + conn *pool.Conn + + // manager is the streaming credentials manager for coordinating re-auth + manager *Manager +} + +// OnNext is called when new credentials are received from the StreamingCredentialsProvider. +// +// This method marks the connection for asynchronous re-authentication. The actual +// re-authentication happens in the background when the connection is returned to the +// pool and is in an idle state. +// +// Asynchronous re-auth is used to: +// - Avoid blocking the credentials provider's notification goroutine +// - Prevent deadlocks with the pool's semaphore (especially with small pool sizes) +// - Ensure re-auth happens when the connection is safe to use (not processing commands) +// +// The reAuthFn callback receives: +// - nil if the connection was successfully acquired for re-auth +// - error if acquisition timed out or failed +// +// Thread-safe: Called by the credentials provider's notification goroutine. +func (c *ConnReAuthCredentialsListener) OnNext(credentials auth.Credentials) { + if c.conn == nil || c.conn.IsClosed() || c.manager == nil || c.reAuth == nil { + return + } + + // Always use async reauth to avoid complex pool semaphore issues + // The synchronous path can cause deadlocks in the pool's semaphore mechanism + // when called from the Subscribe goroutine, especially with small pool sizes. + // The connection pool hook will re-authenticate the connection when it is + // returned to the pool in a clean, idle state. + c.manager.MarkForReAuth(c.conn, func(err error) { + // err is from connection acquisition (timeout, etc.) + if err != nil { + // Log the error + c.OnError(err) + return + } + // err is from reauth command execution + err = c.reAuth(c.conn, credentials) + if err != nil { + // Log the error + c.OnError(err) + return + } + }) +} + +// OnError is called when an error occurs during credential streaming or re-authentication. +// +// This method can be called from: +// - The StreamingCredentialsProvider when there's an error in the credentials stream +// - The re-auth process when connection acquisition times out +// - The re-auth process when the AUTH command fails +// +// The error is delegated to the onErr callback provided during listener creation. +// +// Thread-safe: Can be called from multiple goroutines (provider, re-auth worker). +func (c *ConnReAuthCredentialsListener) OnError(err error) { + if c.onErr == nil { + return + } + + c.onErr(c.conn, err) +} + +// Ensure ConnReAuthCredentialsListener implements the CredentialsListener interface. +var _ auth.CredentialsListener = (*ConnReAuthCredentialsListener)(nil) diff --git a/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/cred_listeners.go b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/cred_listeners.go new file mode 100644 index 000000000..66e6eafdc --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/cred_listeners.go @@ -0,0 +1,77 @@ +package streaming + +import ( + "sync" + + "github.com/redis/go-redis/v9/auth" +) + +// CredentialsListeners is a thread-safe collection of credentials listeners +// indexed by connection ID. +// +// This collection is used by the Manager to maintain a registry of listeners +// for each connection in the pool. Listeners are reused when connections are +// reinitialized (e.g., after a handoff) to avoid creating duplicate subscriptions +// to the StreamingCredentialsProvider. +// +// The collection supports concurrent access from multiple goroutines during +// connection initialization, credential updates, and connection removal. +type CredentialsListeners struct { + // listeners maps connection ID to credentials listener + listeners map[uint64]auth.CredentialsListener + + // lock protects concurrent access to the listeners map + lock sync.RWMutex +} + +// NewCredentialsListeners creates a new thread-safe credentials listeners collection. +func NewCredentialsListeners() *CredentialsListeners { + return &CredentialsListeners{ + listeners: make(map[uint64]auth.CredentialsListener), + } +} + +// Add adds or updates a credentials listener for a connection. +// +// If a listener already exists for the connection ID, it is replaced. +// This is safe because the old listener should have been unsubscribed +// before the connection was reinitialized. +// +// Thread-safe: Can be called concurrently from multiple goroutines. +func (c *CredentialsListeners) Add(connID uint64, listener auth.CredentialsListener) { + c.lock.Lock() + defer c.lock.Unlock() + if c.listeners == nil { + c.listeners = make(map[uint64]auth.CredentialsListener) + } + c.listeners[connID] = listener +} + +// Get retrieves the credentials listener for a connection. +// +// Returns: +// - listener: The credentials listener for the connection, or nil if not found +// - ok: true if a listener exists for the connection ID, false otherwise +// +// Thread-safe: Can be called concurrently from multiple goroutines. +func (c *CredentialsListeners) Get(connID uint64) (auth.CredentialsListener, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if len(c.listeners) == 0 { + return nil, false + } + listener, ok := c.listeners[connID] + return listener, ok +} + +// Remove removes the credentials listener for a connection. +// +// This is called when a connection is removed from the pool to prevent +// memory leaks. If no listener exists for the connection ID, this is a no-op. +// +// Thread-safe: Can be called concurrently from multiple goroutines. +func (c *CredentialsListeners) Remove(connID uint64) { + c.lock.Lock() + defer c.lock.Unlock() + delete(c.listeners, connID) +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/manager.go b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/manager.go new file mode 100644 index 000000000..f785927ee --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/manager.go @@ -0,0 +1,137 @@ +package streaming + +import ( + "errors" + "time" + + "github.com/redis/go-redis/v9/auth" + "github.com/redis/go-redis/v9/internal/pool" +) + +// Manager coordinates streaming credentials and re-authentication for a connection pool. +// +// The manager is responsible for: +// - Creating and managing per-connection credentials listeners +// - Providing the pool hook for re-authentication +// - Coordinating between credentials updates and pool operations +// +// When credentials change via a StreamingCredentialsProvider: +// 1. The credentials listener (ConnReAuthCredentialsListener) receives the update +// 2. It calls MarkForReAuth on the manager +// 3. The manager delegates to the pool hook +// 4. The pool hook schedules background re-authentication +// +// The manager maintains a registry of credentials listeners indexed by connection ID, +// allowing listener reuse when connections are reinitialized (e.g., after handoff). +type Manager struct { + // credentialsListeners maps connection ID to credentials listener + credentialsListeners *CredentialsListeners + + // pool is the connection pool being managed + pool pool.Pooler + + // poolHookRef is the re-authentication pool hook + poolHookRef *ReAuthPoolHook +} + +// NewManager creates a new streaming credentials manager. +// +// Parameters: +// - pl: The connection pool to manage +// - reAuthTimeout: Maximum time to wait for acquiring a connection for re-authentication +// +// The manager creates a ReAuthPoolHook sized to match the pool size, ensuring that +// re-auth operations don't exhaust the connection pool. +func NewManager(pl pool.Pooler, reAuthTimeout time.Duration) *Manager { + m := &Manager{ + pool: pl, + poolHookRef: NewReAuthPoolHook(pl.Size(), reAuthTimeout), + credentialsListeners: NewCredentialsListeners(), + } + m.poolHookRef.manager = m + return m +} + +// PoolHook returns the pool hook for re-authentication. +// +// This hook should be registered with the connection pool to enable +// automatic re-authentication when credentials change. +func (m *Manager) PoolHook() pool.PoolHook { + return m.poolHookRef +} + +// Listener returns or creates a credentials listener for a connection. +// +// This method is called during connection initialization to set up the +// credentials listener. If a listener already exists for the connection ID +// (e.g., after a handoff), it is reused. +// +// Parameters: +// - poolCn: The connection to create/get a listener for +// - reAuth: Function to re-authenticate the connection with new credentials +// - onErr: Function to call when re-authentication fails +// +// Returns: +// - auth.CredentialsListener: The listener to subscribe to the credentials provider +// - error: Non-nil if poolCn is nil +// +// Note: The reAuth and onErr callbacks are captured once when the listener is +// created and reused for the connection's lifetime. They should not change. +// +// Thread-safe: Can be called concurrently during connection initialization. +func (m *Manager) Listener( + poolCn *pool.Conn, + reAuth func(*pool.Conn, auth.Credentials) error, + onErr func(*pool.Conn, error), +) (auth.CredentialsListener, error) { + if poolCn == nil { + return nil, errors.New("poolCn cannot be nil") + } + connID := poolCn.GetID() + // if we reconnect the underlying network connection, the streaming credentials listener will continue to work + // so we can get the old listener from the cache and use it. + // subscribing the same (an already subscribed) listener for a StreamingCredentialsProvider SHOULD be a no-op + listener, ok := m.credentialsListeners.Get(connID) + if !ok || listener == nil { + // Create new listener for this connection + // Note: Callbacks (reAuth, onErr) are captured once and reused for the connection's lifetime + newCredListener := &ConnReAuthCredentialsListener{ + conn: poolCn, + reAuth: reAuth, + onErr: onErr, + manager: m, + } + + m.credentialsListeners.Add(connID, newCredListener) + listener = newCredListener + } + return listener, nil +} + +// MarkForReAuth marks a connection for re-authentication. +// +// This method is called by the credentials listener when new credentials are +// received. It delegates to the pool hook to schedule background re-authentication. +// +// Parameters: +// - poolCn: The connection to re-authenticate +// - reAuthFn: Function to call for re-authentication, receives error if acquisition fails +// +// Thread-safe: Called by credentials listeners when credentials change. +func (m *Manager) MarkForReAuth(poolCn *pool.Conn, reAuthFn func(error)) { + connID := poolCn.GetID() + m.poolHookRef.MarkForReAuth(connID, reAuthFn) +} + +// RemoveListener removes the credentials listener for a connection. +// +// This method is called by the pool hook's OnRemove to clean up listeners +// when connections are removed from the pool. +// +// Parameters: +// - connID: The connection ID whose listener should be removed +// +// Thread-safe: Called during connection removal. +func (m *Manager) RemoveListener(connID uint64) { + m.credentialsListeners.Remove(connID) +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/pool_hook.go b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/pool_hook.go new file mode 100644 index 000000000..aaf4f6099 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/pool_hook.go @@ -0,0 +1,241 @@ +package streaming + +import ( + "context" + "sync" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/pool" +) + +// ReAuthPoolHook is a pool hook that manages background re-authentication of connections +// when credentials change via a streaming credentials provider. +// +// The hook uses a semaphore-based worker pool to limit concurrent re-authentication +// operations and prevent pool exhaustion. When credentials change, connections are +// marked for re-authentication and processed asynchronously in the background. +// +// The re-authentication process: +// 1. OnPut: When a connection is returned to the pool, check if it needs re-auth +// 2. If yes, schedule it for background processing (move from shouldReAuth to scheduledReAuth) +// 3. A worker goroutine acquires the connection (waits until it's not in use) +// 4. Executes the re-auth function while holding the connection +// 5. Releases the connection back to the pool +// +// The hook ensures that: +// - Only one re-auth operation runs per connection at a time +// - Connections are not used for commands during re-authentication +// - Re-auth operations timeout if they can't acquire the connection +// - Resources are properly cleaned up on connection removal +type ReAuthPoolHook struct { + // shouldReAuth maps connection ID to re-auth function + // Connections in this map need re-authentication but haven't been scheduled yet + shouldReAuth map[uint64]func(error) + shouldReAuthLock sync.RWMutex + + // workers is a semaphore limiting concurrent re-auth operations + // Initialized with poolSize tokens to prevent pool exhaustion + // Uses FastSemaphore for better performance with eventual fairness + workers *internal.FastSemaphore + + // reAuthTimeout is the maximum time to wait for acquiring a connection for re-auth + reAuthTimeout time.Duration + + // scheduledReAuth maps connection ID to scheduled status + // Connections in this map have a background worker attempting re-authentication + scheduledReAuth map[uint64]bool + scheduledLock sync.RWMutex + + // manager is a back-reference for cleanup operations + manager *Manager +} + +// NewReAuthPoolHook creates a new re-authentication pool hook. +// +// Parameters: +// - poolSize: Maximum number of concurrent re-auth operations (typically matches pool size) +// - reAuthTimeout: Maximum time to wait for acquiring a connection for re-authentication +// +// The poolSize parameter is used to initialize the worker semaphore, ensuring that +// re-auth operations don't exhaust the connection pool. +func NewReAuthPoolHook(poolSize int, reAuthTimeout time.Duration) *ReAuthPoolHook { + return &ReAuthPoolHook{ + shouldReAuth: make(map[uint64]func(error)), + scheduledReAuth: make(map[uint64]bool), + workers: internal.NewFastSemaphore(int32(poolSize)), + reAuthTimeout: reAuthTimeout, + } +} + +// MarkForReAuth marks a connection for re-authentication. +// +// This method is called when credentials change and a connection needs to be +// re-authenticated. The actual re-authentication happens asynchronously when +// the connection is returned to the pool (in OnPut). +// +// Parameters: +// - connID: The connection ID to mark for re-authentication +// - reAuthFn: Function to call for re-authentication, receives error if acquisition fails +// +// Thread-safe: Can be called concurrently from multiple goroutines. +func (r *ReAuthPoolHook) MarkForReAuth(connID uint64, reAuthFn func(error)) { + r.shouldReAuthLock.Lock() + defer r.shouldReAuthLock.Unlock() + r.shouldReAuth[connID] = reAuthFn +} + +// OnGet is called when a connection is retrieved from the pool. +// +// This hook checks if the connection needs re-authentication or has a scheduled +// re-auth operation. If so, it rejects the connection (returns accept=false), +// causing the pool to try another connection. +// +// Returns: +// - accept: false if connection needs re-auth, true otherwise +// - err: always nil (errors are not used in this hook) +// +// Thread-safe: Called concurrently by multiple goroutines getting connections. +func (r *ReAuthPoolHook) OnGet(_ context.Context, conn *pool.Conn, _ bool) (accept bool, err error) { + connID := conn.GetID() + r.shouldReAuthLock.RLock() + _, shouldReAuth := r.shouldReAuth[connID] + r.shouldReAuthLock.RUnlock() + // This connection was marked for reauth while in the pool, + // reject the connection + if shouldReAuth { + // simply reject the connection, it will be re-authenticated in OnPut + return false, nil + } + r.scheduledLock.RLock() + _, hasScheduled := r.scheduledReAuth[connID] + r.scheduledLock.RUnlock() + // has scheduled reauth, reject the connection + if hasScheduled { + // simply reject the connection, it currently has a reauth scheduled + // and the worker is waiting for slot to execute the reauth + return false, nil + } + return true, nil +} + +// OnPut is called when a connection is returned to the pool. +// +// This hook checks if the connection needs re-authentication. If so, it schedules +// a background goroutine to perform the re-auth asynchronously. The goroutine: +// 1. Waits for a worker slot (semaphore) +// 2. Acquires the connection (waits until not in use) +// 3. Executes the re-auth function +// 4. Releases the connection and worker slot +// +// The connection is always pooled (not removed) since re-auth happens in background. +// +// Returns: +// - shouldPool: always true (connection stays in pool during background re-auth) +// - shouldRemove: always false +// - err: always nil +// +// Thread-safe: Called concurrently by multiple goroutines returning connections. +func (r *ReAuthPoolHook) OnPut(_ context.Context, conn *pool.Conn) (bool, bool, error) { + if conn == nil { + // noop + return true, false, nil + } + connID := conn.GetID() + // Check if reauth is needed and get the function with proper locking + r.shouldReAuthLock.RLock() + reAuthFn, ok := r.shouldReAuth[connID] + r.shouldReAuthLock.RUnlock() + + if ok { + // Acquire both locks to atomically move from shouldReAuth to scheduledReAuth + // This prevents race conditions where OnGet might miss the transition + r.shouldReAuthLock.Lock() + r.scheduledLock.Lock() + r.scheduledReAuth[connID] = true + delete(r.shouldReAuth, connID) + r.scheduledLock.Unlock() + r.shouldReAuthLock.Unlock() + go func() { + r.workers.AcquireBlocking() + // safety first + if conn == nil || (conn != nil && conn.IsClosed()) { + r.workers.Release() + return + } + defer func() { + if rec := recover(); rec != nil { + // once again - safety first + internal.Logger.Printf(context.Background(), "panic in reauth worker: %v", rec) + } + r.scheduledLock.Lock() + delete(r.scheduledReAuth, connID) + r.scheduledLock.Unlock() + r.workers.Release() + }() + + // Create timeout context for connection acquisition + // This prevents indefinite waiting if the connection is stuck + ctx, cancel := context.WithTimeout(context.Background(), r.reAuthTimeout) + defer cancel() + + // Try to acquire the connection for re-authentication + // We need to ensure the connection is IDLE (not IN_USE) before transitioning to UNUSABLE + // This prevents re-authentication from interfering with active commands + // Use AwaitAndTransition to wait for the connection to become IDLE + stateMachine := conn.GetStateMachine() + if stateMachine == nil { + // No state machine - should not happen, but handle gracefully + reAuthFn(pool.ErrConnUnusableTimeout) + return + } + + // Use predefined slice to avoid allocation + _, err := stateMachine.AwaitAndTransition(ctx, pool.ValidFromIdle(), pool.StateUnusable) + if err != nil { + // Timeout or other error occurred, cannot acquire connection + reAuthFn(err) + return + } + + // safety first + if !conn.IsClosed() { + // Successfully acquired the connection, perform reauth + reAuthFn(nil) + } + + // Release the connection: transition from UNUSABLE back to IDLE + stateMachine.Transition(pool.StateIdle) + }() + } + + // the reauth will happen in background, as far as the pool is concerned: + // pool the connection, don't remove it, no error + return true, false, nil +} + +// OnRemove is called when a connection is removed from the pool. +// +// This hook cleans up all state associated with the connection: +// - Removes from shouldReAuth map (pending re-auth) +// - Removes from scheduledReAuth map (active re-auth) +// - Removes credentials listener from manager +// +// This prevents memory leaks and ensures that removed connections don't have +// lingering re-auth operations or listeners. +// +// Thread-safe: Called when connections are removed due to errors, timeouts, or pool closure. +func (r *ReAuthPoolHook) OnRemove(_ context.Context, conn *pool.Conn, _ error) { + connID := conn.GetID() + r.shouldReAuthLock.Lock() + r.scheduledLock.Lock() + delete(r.scheduledReAuth, connID) + delete(r.shouldReAuth, connID) + r.scheduledLock.Unlock() + r.shouldReAuthLock.Unlock() + if r.manager != nil { + r.manager.RemoveListener(connID) + } +} + +var _ pool.PoolHook = (*ReAuthPoolHook)(nil) diff --git a/vendor/github.com/redis/go-redis/v9/internal/hashtag/hashtag.go b/vendor/github.com/redis/go-redis/v9/internal/hashtag/hashtag.go index f13ee816d..ea56fd6c7 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/hashtag/hashtag.go +++ b/vendor/github.com/redis/go-redis/v9/internal/hashtag/hashtag.go @@ -56,6 +56,18 @@ func Key(key string) string { return key } +func Present(key string) bool { + if key == "" { + return false + } + if s := strings.IndexByte(key, '{'); s > -1 { + if e := strings.IndexByte(key[s+1:], '}'); e > 0 { + return true + } + } + return false +} + func RandomSlot() int { return rand.Intn(slotNumber) } diff --git a/vendor/github.com/redis/go-redis/v9/internal/interfaces/interfaces.go b/vendor/github.com/redis/go-redis/v9/internal/interfaces/interfaces.go new file mode 100644 index 000000000..17e2a1850 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/interfaces/interfaces.go @@ -0,0 +1,54 @@ +// Package interfaces provides shared interfaces used by both the main redis package +// and the maintnotifications upgrade package to avoid circular dependencies. +package interfaces + +import ( + "context" + "net" + "time" +) + +// NotificationProcessor is (most probably) a push.NotificationProcessor +// forward declaration to avoid circular imports +type NotificationProcessor interface { + RegisterHandler(pushNotificationName string, handler interface{}, protected bool) error + UnregisterHandler(pushNotificationName string) error + GetHandler(pushNotificationName string) interface{} +} + +// ClientInterface defines the interface that clients must implement for maintnotifications upgrades. +type ClientInterface interface { + // GetOptions returns the client options. + GetOptions() OptionsInterface + + // GetPushProcessor returns the client's push notification processor. + GetPushProcessor() NotificationProcessor +} + +// OptionsInterface defines the interface for client options. +// Uses an adapter pattern to avoid circular dependencies. +type OptionsInterface interface { + // GetReadTimeout returns the read timeout. + GetReadTimeout() time.Duration + + // GetWriteTimeout returns the write timeout. + GetWriteTimeout() time.Duration + + // GetNetwork returns the network type. + GetNetwork() string + + // GetAddr returns the connection address. + GetAddr() string + + // IsTLSEnabled returns true if TLS is enabled. + IsTLSEnabled() bool + + // GetProtocol returns the protocol version. + GetProtocol() int + + // GetPoolSize returns the connection pool size. + GetPoolSize() int + + // NewDialer returns a new dialer function for the connection. + NewDialer() func(context.Context) (net.Conn, error) +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/log.go b/vendor/github.com/redis/go-redis/v9/internal/log.go index c8b9213de..0bfffc311 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/log.go +++ b/vendor/github.com/redis/go-redis/v9/internal/log.go @@ -7,20 +7,73 @@ import ( "os" ) +// TODO (ned): Revisit logging +// Add more standardized approach with log levels and configurability + type Logging interface { Printf(ctx context.Context, format string, v ...interface{}) } -type logger struct { +type DefaultLogger struct { log *log.Logger } -func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) { +func (l *DefaultLogger) Printf(ctx context.Context, format string, v ...interface{}) { _ = l.log.Output(2, fmt.Sprintf(format, v...)) } +func NewDefaultLogger() Logging { + return &DefaultLogger{ + log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile), + } +} + // Logger calls Output to print to the stderr. // Arguments are handled in the manner of fmt.Print. -var Logger Logging = &logger{ - log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile), +var Logger Logging = NewDefaultLogger() + +var LogLevel LogLevelT = LogLevelError + +// LogLevelT represents the logging level +type LogLevelT int + +// Log level constants for the entire go-redis library +const ( + LogLevelError LogLevelT = iota // 0 - errors only + LogLevelWarn // 1 - warnings and errors + LogLevelInfo // 2 - info, warnings, and errors + LogLevelDebug // 3 - debug, info, warnings, and errors +) + +// String returns the string representation of the log level +func (l LogLevelT) String() string { + switch l { + case LogLevelError: + return "ERROR" + case LogLevelWarn: + return "WARN" + case LogLevelInfo: + return "INFO" + case LogLevelDebug: + return "DEBUG" + default: + return "UNKNOWN" + } +} + +// IsValid returns true if the log level is valid +func (l LogLevelT) IsValid() bool { + return l >= LogLevelError && l <= LogLevelDebug +} + +func (l LogLevelT) WarnOrAbove() bool { + return l >= LogLevelWarn +} + +func (l LogLevelT) InfoOrAbove() bool { + return l >= LogLevelInfo +} + +func (l LogLevelT) DebugOrAbove() bool { + return l >= LogLevelDebug } diff --git a/vendor/github.com/redis/go-redis/v9/internal/maintnotifications/logs/log_messages.go b/vendor/github.com/redis/go-redis/v9/internal/maintnotifications/logs/log_messages.go new file mode 100644 index 000000000..34cb1692d --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/maintnotifications/logs/log_messages.go @@ -0,0 +1,625 @@ +package logs + +import ( + "encoding/json" + "fmt" + "regexp" + + "github.com/redis/go-redis/v9/internal" +) + +// appendJSONIfDebug appends JSON data to a message only if the global log level is Debug +func appendJSONIfDebug(message string, data map[string]interface{}) string { + if internal.LogLevel.DebugOrAbove() { + jsonData, _ := json.Marshal(data) + return fmt.Sprintf("%s %s", message, string(jsonData)) + } + return message +} + +const ( + // ======================================== + // CIRCUIT_BREAKER.GO - Circuit breaker management + // ======================================== + CircuitBreakerTransitioningToHalfOpenMessage = "circuit breaker transitioning to half-open" + CircuitBreakerOpenedMessage = "circuit breaker opened" + CircuitBreakerReopenedMessage = "circuit breaker reopened" + CircuitBreakerClosedMessage = "circuit breaker closed" + CircuitBreakerCleanupMessage = "circuit breaker cleanup" + CircuitBreakerOpenMessage = "circuit breaker is open, failing fast" + + // ======================================== + // CONFIG.GO - Configuration and debug + // ======================================== + DebugLoggingEnabledMessage = "debug logging enabled" + ConfigDebugMessage = "config debug" + + // ======================================== + // ERRORS.GO - Error message constants + // ======================================== + InvalidRelaxedTimeoutErrorMessage = "relaxed timeout must be greater than 0" + InvalidHandoffTimeoutErrorMessage = "handoff timeout must be greater than 0" + InvalidHandoffWorkersErrorMessage = "MaxWorkers must be greater than or equal to 0" + InvalidHandoffQueueSizeErrorMessage = "handoff queue size must be greater than 0" + InvalidPostHandoffRelaxedDurationErrorMessage = "post-handoff relaxed duration must be greater than or equal to 0" + InvalidEndpointTypeErrorMessage = "invalid endpoint type" + InvalidMaintNotificationsErrorMessage = "invalid maintenance notifications setting (must be 'disabled', 'enabled', or 'auto')" + InvalidHandoffRetriesErrorMessage = "MaxHandoffRetries must be between 1 and 10" + InvalidClientErrorMessage = "invalid client type" + InvalidNotificationErrorMessage = "invalid notification format" + MaxHandoffRetriesReachedErrorMessage = "max handoff retries reached" + HandoffQueueFullErrorMessage = "handoff queue is full, cannot queue new handoff requests - consider increasing HandoffQueueSize or MaxWorkers in configuration" + InvalidCircuitBreakerFailureThresholdErrorMessage = "circuit breaker failure threshold must be >= 1" + InvalidCircuitBreakerResetTimeoutErrorMessage = "circuit breaker reset timeout must be >= 0" + InvalidCircuitBreakerMaxRequestsErrorMessage = "circuit breaker max requests must be >= 1" + ConnectionMarkedForHandoffErrorMessage = "connection marked for handoff" + ConnectionInvalidHandoffStateErrorMessage = "connection is in invalid state for handoff" + ShutdownErrorMessage = "shutdown" + CircuitBreakerOpenErrorMessage = "circuit breaker is open, failing fast" + + // ======================================== + // EXAMPLE_HOOKS.GO - Example metrics hooks + // ======================================== + MetricsHookProcessingNotificationMessage = "metrics hook processing" + MetricsHookRecordedErrorMessage = "metrics hook recorded error" + + // ======================================== + // HANDOFF_WORKER.GO - Connection handoff processing + // ======================================== + HandoffStartedMessage = "handoff started" + HandoffFailedMessage = "handoff failed" + ConnectionNotMarkedForHandoffMessage = "is not marked for handoff and has no retries" + ConnectionNotMarkedForHandoffErrorMessage = "is not marked for handoff" + HandoffRetryAttemptMessage = "Performing handoff" + CannotQueueHandoffForRetryMessage = "can't queue handoff for retry" + HandoffQueueFullMessage = "handoff queue is full" + FailedToDialNewEndpointMessage = "failed to dial new endpoint" + ApplyingRelaxedTimeoutDueToPostHandoffMessage = "applying relaxed timeout due to post-handoff" + HandoffSuccessMessage = "handoff succeeded" + RemovingConnectionFromPoolMessage = "removing connection from pool" + NoPoolProvidedMessageCannotRemoveMessage = "no pool provided, cannot remove connection, closing it" + WorkerExitingDueToShutdownMessage = "worker exiting due to shutdown" + WorkerExitingDueToShutdownWhileProcessingMessage = "worker exiting due to shutdown while processing request" + WorkerPanicRecoveredMessage = "worker panic recovered" + WorkerExitingDueToInactivityTimeoutMessage = "worker exiting due to inactivity timeout" + ReachedMaxHandoffRetriesMessage = "reached max handoff retries" + + // ======================================== + // MANAGER.GO - Moving operation tracking and handler registration + // ======================================== + DuplicateMovingOperationMessage = "duplicate MOVING operation ignored" + TrackingMovingOperationMessage = "tracking MOVING operation" + UntrackingMovingOperationMessage = "untracking MOVING operation" + OperationNotTrackedMessage = "operation not tracked" + FailedToRegisterHandlerMessage = "failed to register handler" + + // ======================================== + // HOOKS.GO - Notification processing hooks + // ======================================== + ProcessingNotificationMessage = "processing notification started" + ProcessingNotificationFailedMessage = "proccessing notification failed" + ProcessingNotificationSucceededMessage = "processing notification succeeded" + + // ======================================== + // POOL_HOOK.GO - Pool connection management + // ======================================== + FailedToQueueHandoffMessage = "failed to queue handoff" + MarkedForHandoffMessage = "connection marked for handoff" + + // ======================================== + // PUSH_NOTIFICATION_HANDLER.GO - Push notification validation and processing + // ======================================== + InvalidNotificationFormatMessage = "invalid notification format" + InvalidNotificationTypeFormatMessage = "invalid notification type format" + InvalidSeqIDInMovingNotificationMessage = "invalid seqID in MOVING notification" + InvalidTimeSInMovingNotificationMessage = "invalid timeS in MOVING notification" + InvalidNewEndpointInMovingNotificationMessage = "invalid newEndpoint in MOVING notification" + NoConnectionInHandlerContextMessage = "no connection in handler context" + InvalidConnectionTypeInHandlerContextMessage = "invalid connection type in handler context" + SchedulingHandoffToCurrentEndpointMessage = "scheduling handoff to current endpoint" + RelaxedTimeoutDueToNotificationMessage = "applying relaxed timeout due to notification" + UnrelaxedTimeoutMessage = "clearing relaxed timeout" + ManagerNotInitializedMessage = "manager not initialized" + FailedToMarkForHandoffMessage = "failed to mark connection for handoff" + + // ======================================== + // used in pool/conn + // ======================================== + UnrelaxedTimeoutAfterDeadlineMessage = "clearing relaxed timeout after deadline" +) + +func HandoffStarted(connID uint64, newEndpoint string) string { + message := fmt.Sprintf("conn[%d] %s to %s", connID, HandoffStartedMessage, newEndpoint) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "endpoint": newEndpoint, + }) +} + +func HandoffFailed(connID uint64, newEndpoint string, attempt int, maxAttempts int, err error) string { + message := fmt.Sprintf("conn[%d] %s to %s (attempt %d/%d): %v", connID, HandoffFailedMessage, newEndpoint, attempt, maxAttempts, err) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "endpoint": newEndpoint, + "attempt": attempt, + "maxAttempts": maxAttempts, + "error": err.Error(), + }) +} + +func HandoffSucceeded(connID uint64, newEndpoint string) string { + message := fmt.Sprintf("conn[%d] %s to %s", connID, HandoffSuccessMessage, newEndpoint) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "endpoint": newEndpoint, + }) +} + +// Timeout-related log functions +func RelaxedTimeoutDueToNotification(connID uint64, notificationType string, timeout interface{}) string { + message := fmt.Sprintf("conn[%d] %s %s (%v)", connID, RelaxedTimeoutDueToNotificationMessage, notificationType, timeout) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "notificationType": notificationType, + "timeout": fmt.Sprintf("%v", timeout), + }) +} + +func UnrelaxedTimeout(connID uint64) string { + message := fmt.Sprintf("conn[%d] %s", connID, UnrelaxedTimeoutMessage) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + }) +} + +func UnrelaxedTimeoutAfterDeadline(connID uint64) string { + message := fmt.Sprintf("conn[%d] %s", connID, UnrelaxedTimeoutAfterDeadlineMessage) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + }) +} + +// Handoff queue and marking functions +func HandoffQueueFull(queueLen, queueCap int) string { + message := fmt.Sprintf("%s (%d/%d), cannot queue new handoff requests - consider increasing HandoffQueueSize or MaxWorkers in configuration", HandoffQueueFullMessage, queueLen, queueCap) + return appendJSONIfDebug(message, map[string]interface{}{ + "queueLen": queueLen, + "queueCap": queueCap, + }) +} + +func FailedToQueueHandoff(connID uint64, err error) string { + message := fmt.Sprintf("conn[%d] %s: %v", connID, FailedToQueueHandoffMessage, err) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "error": err.Error(), + }) +} + +func FailedToMarkForHandoff(connID uint64, err error) string { + message := fmt.Sprintf("conn[%d] %s: %v", connID, FailedToMarkForHandoffMessage, err) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "error": err.Error(), + }) +} + +func FailedToDialNewEndpoint(connID uint64, endpoint string, err error) string { + message := fmt.Sprintf("conn[%d] %s %s: %v", connID, FailedToDialNewEndpointMessage, endpoint, err) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "endpoint": endpoint, + "error": err.Error(), + }) +} + +func ReachedMaxHandoffRetries(connID uint64, endpoint string, maxRetries int) string { + message := fmt.Sprintf("conn[%d] %s to %s (max retries: %d)", connID, ReachedMaxHandoffRetriesMessage, endpoint, maxRetries) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "endpoint": endpoint, + "maxRetries": maxRetries, + }) +} + +// Notification processing functions +func ProcessingNotification(connID uint64, seqID int64, notificationType string, notification interface{}) string { + message := fmt.Sprintf("conn[%d] seqID[%d] %s %s: %v", connID, seqID, ProcessingNotificationMessage, notificationType, notification) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "seqID": seqID, + "notificationType": notificationType, + "notification": fmt.Sprintf("%v", notification), + }) +} + +func ProcessingNotificationFailed(connID uint64, notificationType string, err error, notification interface{}) string { + message := fmt.Sprintf("conn[%d] %s %s: %v - %v", connID, ProcessingNotificationFailedMessage, notificationType, err, notification) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "notificationType": notificationType, + "error": err.Error(), + "notification": fmt.Sprintf("%v", notification), + }) +} + +func ProcessingNotificationSucceeded(connID uint64, notificationType string) string { + message := fmt.Sprintf("conn[%d] %s %s", connID, ProcessingNotificationSucceededMessage, notificationType) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "notificationType": notificationType, + }) +} + +// Moving operation tracking functions +func DuplicateMovingOperation(connID uint64, endpoint string, seqID int64) string { + message := fmt.Sprintf("conn[%d] %s for %s seqID[%d]", connID, DuplicateMovingOperationMessage, endpoint, seqID) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "endpoint": endpoint, + "seqID": seqID, + }) +} + +func TrackingMovingOperation(connID uint64, endpoint string, seqID int64) string { + message := fmt.Sprintf("conn[%d] %s for %s seqID[%d]", connID, TrackingMovingOperationMessage, endpoint, seqID) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "endpoint": endpoint, + "seqID": seqID, + }) +} + +func UntrackingMovingOperation(connID uint64, seqID int64) string { + message := fmt.Sprintf("conn[%d] %s seqID[%d]", connID, UntrackingMovingOperationMessage, seqID) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "seqID": seqID, + }) +} + +func OperationNotTracked(connID uint64, seqID int64) string { + message := fmt.Sprintf("conn[%d] %s seqID[%d]", connID, OperationNotTrackedMessage, seqID) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "seqID": seqID, + }) +} + +// Connection pool functions +func RemovingConnectionFromPool(connID uint64, reason error) string { + message := fmt.Sprintf("conn[%d] %s due to: %v", connID, RemovingConnectionFromPoolMessage, reason) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "reason": reason.Error(), + }) +} + +func NoPoolProvidedCannotRemove(connID uint64, reason error) string { + message := fmt.Sprintf("conn[%d] %s due to: %v", connID, NoPoolProvidedMessageCannotRemoveMessage, reason) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "reason": reason.Error(), + }) +} + +// Circuit breaker functions +func CircuitBreakerOpen(connID uint64, endpoint string) string { + message := fmt.Sprintf("conn[%d] %s for %s", connID, CircuitBreakerOpenMessage, endpoint) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "endpoint": endpoint, + }) +} + +// Additional handoff functions for specific cases +func ConnectionNotMarkedForHandoff(connID uint64) string { + message := fmt.Sprintf("conn[%d] %s", connID, ConnectionNotMarkedForHandoffMessage) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + }) +} + +func ConnectionNotMarkedForHandoffError(connID uint64) string { + return fmt.Sprintf("conn[%d] %s", connID, ConnectionNotMarkedForHandoffErrorMessage) +} + +func HandoffRetryAttempt(connID uint64, retries int, newEndpoint string, oldEndpoint string) string { + message := fmt.Sprintf("conn[%d] Retry %d: %s to %s(was %s)", connID, retries, HandoffRetryAttemptMessage, newEndpoint, oldEndpoint) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "retries": retries, + "newEndpoint": newEndpoint, + "oldEndpoint": oldEndpoint, + }) +} + +func CannotQueueHandoffForRetry(err error) string { + message := fmt.Sprintf("%s: %v", CannotQueueHandoffForRetryMessage, err) + return appendJSONIfDebug(message, map[string]interface{}{ + "error": err.Error(), + }) +} + +// Validation and error functions +func InvalidNotificationFormat(notification interface{}) string { + message := fmt.Sprintf("%s: %v", InvalidNotificationFormatMessage, notification) + return appendJSONIfDebug(message, map[string]interface{}{ + "notification": fmt.Sprintf("%v", notification), + }) +} + +func InvalidNotificationTypeFormat(notificationType interface{}) string { + message := fmt.Sprintf("%s: %v", InvalidNotificationTypeFormatMessage, notificationType) + return appendJSONIfDebug(message, map[string]interface{}{ + "notificationType": fmt.Sprintf("%v", notificationType), + }) +} + +// InvalidNotification creates a log message for invalid notifications of any type +func InvalidNotification(notificationType string, notification interface{}) string { + message := fmt.Sprintf("invalid %s notification: %v", notificationType, notification) + return appendJSONIfDebug(message, map[string]interface{}{ + "notificationType": notificationType, + "notification": fmt.Sprintf("%v", notification), + }) +} + +func InvalidSeqIDInMovingNotification(seqID interface{}) string { + message := fmt.Sprintf("%s: %v", InvalidSeqIDInMovingNotificationMessage, seqID) + return appendJSONIfDebug(message, map[string]interface{}{ + "seqID": fmt.Sprintf("%v", seqID), + }) +} + +func InvalidTimeSInMovingNotification(timeS interface{}) string { + message := fmt.Sprintf("%s: %v", InvalidTimeSInMovingNotificationMessage, timeS) + return appendJSONIfDebug(message, map[string]interface{}{ + "timeS": fmt.Sprintf("%v", timeS), + }) +} + +func InvalidNewEndpointInMovingNotification(newEndpoint interface{}) string { + message := fmt.Sprintf("%s: %v", InvalidNewEndpointInMovingNotificationMessage, newEndpoint) + return appendJSONIfDebug(message, map[string]interface{}{ + "newEndpoint": fmt.Sprintf("%v", newEndpoint), + }) +} + +func NoConnectionInHandlerContext(notificationType string) string { + message := fmt.Sprintf("%s for %s notification", NoConnectionInHandlerContextMessage, notificationType) + return appendJSONIfDebug(message, map[string]interface{}{ + "notificationType": notificationType, + }) +} + +func InvalidConnectionTypeInHandlerContext(notificationType string, conn interface{}, handlerCtx interface{}) string { + message := fmt.Sprintf("%s for %s notification - %T %#v", InvalidConnectionTypeInHandlerContextMessage, notificationType, conn, handlerCtx) + return appendJSONIfDebug(message, map[string]interface{}{ + "notificationType": notificationType, + "connType": fmt.Sprintf("%T", conn), + }) +} + +func SchedulingHandoffToCurrentEndpoint(connID uint64, seconds float64) string { + message := fmt.Sprintf("conn[%d] %s in %v seconds", connID, SchedulingHandoffToCurrentEndpointMessage, seconds) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "seconds": seconds, + }) +} + +func ManagerNotInitialized() string { + return appendJSONIfDebug(ManagerNotInitializedMessage, map[string]interface{}{}) +} + +func FailedToRegisterHandler(notificationType string, err error) string { + message := fmt.Sprintf("%s for %s: %v", FailedToRegisterHandlerMessage, notificationType, err) + return appendJSONIfDebug(message, map[string]interface{}{ + "notificationType": notificationType, + "error": err.Error(), + }) +} + +func ShutdownError() string { + return appendJSONIfDebug(ShutdownErrorMessage, map[string]interface{}{}) +} + +// Configuration validation error functions +func InvalidRelaxedTimeoutError() string { + return appendJSONIfDebug(InvalidRelaxedTimeoutErrorMessage, map[string]interface{}{}) +} + +func InvalidHandoffTimeoutError() string { + return appendJSONIfDebug(InvalidHandoffTimeoutErrorMessage, map[string]interface{}{}) +} + +func InvalidHandoffWorkersError() string { + return appendJSONIfDebug(InvalidHandoffWorkersErrorMessage, map[string]interface{}{}) +} + +func InvalidHandoffQueueSizeError() string { + return appendJSONIfDebug(InvalidHandoffQueueSizeErrorMessage, map[string]interface{}{}) +} + +func InvalidPostHandoffRelaxedDurationError() string { + return appendJSONIfDebug(InvalidPostHandoffRelaxedDurationErrorMessage, map[string]interface{}{}) +} + +func InvalidEndpointTypeError() string { + return appendJSONIfDebug(InvalidEndpointTypeErrorMessage, map[string]interface{}{}) +} + +func InvalidMaintNotificationsError() string { + return appendJSONIfDebug(InvalidMaintNotificationsErrorMessage, map[string]interface{}{}) +} + +func InvalidHandoffRetriesError() string { + return appendJSONIfDebug(InvalidHandoffRetriesErrorMessage, map[string]interface{}{}) +} + +func InvalidClientError() string { + return appendJSONIfDebug(InvalidClientErrorMessage, map[string]interface{}{}) +} + +func InvalidNotificationError() string { + return appendJSONIfDebug(InvalidNotificationErrorMessage, map[string]interface{}{}) +} + +func MaxHandoffRetriesReachedError() string { + return appendJSONIfDebug(MaxHandoffRetriesReachedErrorMessage, map[string]interface{}{}) +} + +func HandoffQueueFullError() string { + return appendJSONIfDebug(HandoffQueueFullErrorMessage, map[string]interface{}{}) +} + +func InvalidCircuitBreakerFailureThresholdError() string { + return appendJSONIfDebug(InvalidCircuitBreakerFailureThresholdErrorMessage, map[string]interface{}{}) +} + +func InvalidCircuitBreakerResetTimeoutError() string { + return appendJSONIfDebug(InvalidCircuitBreakerResetTimeoutErrorMessage, map[string]interface{}{}) +} + +func InvalidCircuitBreakerMaxRequestsError() string { + return appendJSONIfDebug(InvalidCircuitBreakerMaxRequestsErrorMessage, map[string]interface{}{}) +} + +// Configuration and debug functions +func DebugLoggingEnabled() string { + return appendJSONIfDebug(DebugLoggingEnabledMessage, map[string]interface{}{}) +} + +func ConfigDebug(config interface{}) string { + message := fmt.Sprintf("%s: %+v", ConfigDebugMessage, config) + return appendJSONIfDebug(message, map[string]interface{}{ + "config": fmt.Sprintf("%+v", config), + }) +} + +// Handoff worker functions +func WorkerExitingDueToShutdown() string { + return appendJSONIfDebug(WorkerExitingDueToShutdownMessage, map[string]interface{}{}) +} + +func WorkerExitingDueToShutdownWhileProcessing() string { + return appendJSONIfDebug(WorkerExitingDueToShutdownWhileProcessingMessage, map[string]interface{}{}) +} + +func WorkerPanicRecovered(panicValue interface{}) string { + message := fmt.Sprintf("%s: %v", WorkerPanicRecoveredMessage, panicValue) + return appendJSONIfDebug(message, map[string]interface{}{ + "panic": fmt.Sprintf("%v", panicValue), + }) +} + +func WorkerExitingDueToInactivityTimeout(timeout interface{}) string { + message := fmt.Sprintf("%s (%v)", WorkerExitingDueToInactivityTimeoutMessage, timeout) + return appendJSONIfDebug(message, map[string]interface{}{ + "timeout": fmt.Sprintf("%v", timeout), + }) +} + +func ApplyingRelaxedTimeoutDueToPostHandoff(connID uint64, timeout interface{}, until string) string { + message := fmt.Sprintf("conn[%d] %s (%v) until %s", connID, ApplyingRelaxedTimeoutDueToPostHandoffMessage, timeout, until) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + "timeout": fmt.Sprintf("%v", timeout), + "until": until, + }) +} + +// Example hooks functions +func MetricsHookProcessingNotification(notificationType string, connID uint64) string { + message := fmt.Sprintf("%s %s notification on conn[%d]", MetricsHookProcessingNotificationMessage, notificationType, connID) + return appendJSONIfDebug(message, map[string]interface{}{ + "notificationType": notificationType, + "connID": connID, + }) +} + +func MetricsHookRecordedError(notificationType string, connID uint64, err error) string { + message := fmt.Sprintf("%s for %s notification on conn[%d]: %v", MetricsHookRecordedErrorMessage, notificationType, connID, err) + return appendJSONIfDebug(message, map[string]interface{}{ + "notificationType": notificationType, + "connID": connID, + "error": err.Error(), + }) +} + +// Pool hook functions +func MarkedForHandoff(connID uint64) string { + message := fmt.Sprintf("conn[%d] %s", connID, MarkedForHandoffMessage) + return appendJSONIfDebug(message, map[string]interface{}{ + "connID": connID, + }) +} + +// Circuit breaker additional functions +func CircuitBreakerTransitioningToHalfOpen(endpoint string) string { + message := fmt.Sprintf("%s for %s", CircuitBreakerTransitioningToHalfOpenMessage, endpoint) + return appendJSONIfDebug(message, map[string]interface{}{ + "endpoint": endpoint, + }) +} + +func CircuitBreakerOpened(endpoint string, failures int64) string { + message := fmt.Sprintf("%s for endpoint %s after %d failures", CircuitBreakerOpenedMessage, endpoint, failures) + return appendJSONIfDebug(message, map[string]interface{}{ + "endpoint": endpoint, + "failures": failures, + }) +} + +func CircuitBreakerReopened(endpoint string) string { + message := fmt.Sprintf("%s for endpoint %s due to failure in half-open state", CircuitBreakerReopenedMessage, endpoint) + return appendJSONIfDebug(message, map[string]interface{}{ + "endpoint": endpoint, + }) +} + +func CircuitBreakerClosed(endpoint string, successes int64) string { + message := fmt.Sprintf("%s for endpoint %s after %d successful requests", CircuitBreakerClosedMessage, endpoint, successes) + return appendJSONIfDebug(message, map[string]interface{}{ + "endpoint": endpoint, + "successes": successes, + }) +} + +func CircuitBreakerCleanup(removed int, total int) string { + message := fmt.Sprintf("%s removed %d/%d entries", CircuitBreakerCleanupMessage, removed, total) + return appendJSONIfDebug(message, map[string]interface{}{ + "removed": removed, + "total": total, + }) +} + +// ExtractDataFromLogMessage extracts structured data from maintnotifications log messages +// Returns a map containing the parsed key-value pairs from the structured data section +// Example: "conn[123] handoff started to localhost:6379 {"connID":123,"endpoint":"localhost:6379"}" +// Returns: map[string]interface{}{"connID": 123, "endpoint": "localhost:6379"} +func ExtractDataFromLogMessage(logMessage string) map[string]interface{} { + result := make(map[string]interface{}) + + // Find the JSON data section at the end of the message + re := regexp.MustCompile(`(\{.*\})$`) + matches := re.FindStringSubmatch(logMessage) + if len(matches) < 2 { + return result + } + + jsonStr := matches[1] + if jsonStr == "" { + return result + } + + // Parse the JSON directly + var jsonResult map[string]interface{} + if err := json.Unmarshal([]byte(jsonStr), &jsonResult); err == nil { + return jsonResult + } + + // If JSON parsing fails, return empty map + return result +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go index a5233fb88..95d83bfde 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go @@ -1,82 +1,803 @@ +// Package pool implements the pool management package pool import ( "bufio" "context" + "errors" + "fmt" "net" + "sync" "sync/atomic" - "syscall" "time" + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" "github.com/redis/go-redis/v9/internal/proto" ) var noDeadline = time.Time{} +// Preallocated errors for hot paths to avoid allocations +var ( + errAlreadyMarkedForHandoff = errors.New("connection is already marked for handoff") + errNotMarkedForHandoff = errors.New("connection was not marked for handoff") + errHandoffStateChanged = errors.New("handoff state changed during marking") + errConnectionNotAvailable = errors.New("redis: connection not available") + errConnNotAvailableForWrite = errors.New("redis: connection not available for write operation") +) + +// getCachedTimeNs returns the current time in nanoseconds. +// This function previously used a global cache updated by a background goroutine, +// but that caused unnecessary CPU usage when the client was idle (ticker waking up +// the scheduler every 50ms). We now use time.Now() directly, which is fast enough +// on modern systems (vDSO on Linux) and only adds ~1-2% overhead in extreme +// high-concurrency benchmarks while eliminating idle CPU usage. +func getCachedTimeNs() int64 { + return time.Now().UnixNano() +} + +// GetCachedTimeNs returns the current time in nanoseconds. +// Exported for use by other packages that need fast time access. +func GetCachedTimeNs() int64 { + return getCachedTimeNs() +} + +// Global atomic counter for connection IDs +var connIDCounter uint64 + +// HandoffState represents the atomic state for connection handoffs +// This struct is stored atomically to prevent race conditions between +// checking handoff status and reading handoff parameters +type HandoffState struct { + ShouldHandoff bool // Whether connection should be handed off + Endpoint string // New endpoint for handoff + SeqID int64 // Sequence ID from MOVING notification +} + +// atomicNetConn is a wrapper to ensure consistent typing in atomic.Value +type atomicNetConn struct { + conn net.Conn +} + +// generateConnID generates a fast unique identifier for a connection with zero allocations +func generateConnID() uint64 { + return atomic.AddUint64(&connIDCounter, 1) +} + type Conn struct { - usedAt int64 // atomic - netConn net.Conn + // Connection identifier for unique tracking + id uint64 - // for checking the health status of the connection, it may be nil. - sysConn syscall.Conn + usedAt atomic.Int64 + lastPutAt atomic.Int64 + + // Lock-free netConn access using atomic.Value + // Contains *atomicNetConn wrapper, accessed atomically for better performance + netConnAtomic atomic.Value // stores *atomicNetConn rd *proto.Reader bw *bufio.Writer wr *proto.Writer - Inited bool + // Lightweight mutex to protect reader operations during handoff + // Only used for the brief period during SetNetConn and HasBufferedData/PeekReplyTypeSafe + readerMu sync.RWMutex + + // State machine for connection state management + // Replaces: usable, Inited, used + // Provides thread-safe state transitions with FIFO waiting queue + // States: CREATED → INITIALIZING → IDLE ⇄ IN_USE + // ↓ + // UNUSABLE (handoff/reauth) + // ↓ + // IDLE/CLOSED + stateMachine *ConnStateMachine + + // Handoff metadata - managed separately from state machine + // These are atomic for lock-free access during handoff operations + handoffStateAtomic atomic.Value // stores *HandoffState + handoffRetriesAtomic atomic.Uint32 // retry counter + pooled bool + pubsub bool + closed atomic.Bool createdAt time.Time + expiresAt time.Time + + // maintenanceNotifications upgrade support: relaxed timeouts during migrations/failovers + + // Using atomic operations for lock-free access to avoid mutex contention + relaxedReadTimeoutNs atomic.Int64 // time.Duration as nanoseconds + relaxedWriteTimeoutNs atomic.Int64 // time.Duration as nanoseconds + relaxedDeadlineNs atomic.Int64 // time.Time as nanoseconds since epoch + + // Counter to track multiple relaxed timeout setters if we have nested calls + // will be decremented when ClearRelaxedTimeout is called or deadline is reached + // if counter reaches 0, we clear the relaxed timeouts + relaxedCounter atomic.Int32 + + // Connection initialization function for reconnections + initConnFunc func(context.Context, *Conn) error + + onClose func() error } func NewConn(netConn net.Conn) *Conn { + return NewConnWithBufferSize(netConn, proto.DefaultBufferSize, proto.DefaultBufferSize) +} + +func NewConnWithBufferSize(netConn net.Conn, readBufSize, writeBufSize int) *Conn { + now := time.Now() cn := &Conn{ - netConn: netConn, - createdAt: time.Now(), + createdAt: now, + id: generateConnID(), // Generate unique ID for this connection + stateMachine: NewConnStateMachine(), + } + + // Use specified buffer sizes, or fall back to 32KiB defaults if 0 + if readBufSize > 0 { + cn.rd = proto.NewReaderSize(netConn, readBufSize) + } else { + cn.rd = proto.NewReader(netConn) // Uses 32KiB default + } + + if writeBufSize > 0 { + cn.bw = bufio.NewWriterSize(netConn, writeBufSize) + } else { + cn.bw = bufio.NewWriterSize(netConn, proto.DefaultBufferSize) } - cn.rd = proto.NewReader(netConn) - cn.bw = bufio.NewWriter(netConn) + + // Store netConn atomically for lock-free access using wrapper + cn.netConnAtomic.Store(&atomicNetConn{conn: netConn}) + cn.wr = proto.NewWriter(cn.bw) - cn.SetUsedAt(time.Now()) - cn.setSysConn() + cn.SetUsedAt(now) + // Initialize handoff state atomically + initialHandoffState := &HandoffState{ + ShouldHandoff: false, + Endpoint: "", + SeqID: 0, + } + cn.handoffStateAtomic.Store(initialHandoffState) return cn } func (cn *Conn) UsedAt() time.Time { - unix := atomic.LoadInt64(&cn.usedAt) - return time.Unix(unix, 0) + return time.Unix(0, cn.usedAt.Load()) } - func (cn *Conn) SetUsedAt(tm time.Time) { - atomic.StoreInt64(&cn.usedAt, tm.Unix()) + cn.usedAt.Store(tm.UnixNano()) +} + +func (cn *Conn) UsedAtNs() int64 { + return cn.usedAt.Load() +} +func (cn *Conn) SetUsedAtNs(ns int64) { + cn.usedAt.Store(ns) +} + +func (cn *Conn) LastPutAtNs() int64 { + return cn.lastPutAt.Load() +} +func (cn *Conn) SetLastPutAtNs(ns int64) { + cn.lastPutAt.Store(ns) +} + +// Backward-compatible wrapper methods for state machine +// These maintain the existing API while using the new state machine internally + +// CompareAndSwapUsable atomically compares and swaps the usable flag (lock-free). +// +// This is used by background operations (handoff, re-auth) to acquire exclusive +// access to a connection. The operation sets usable to false, preventing the pool +// from returning the connection to clients. +// +// Returns true if the swap was successful (old value matched), false otherwise. +// +// Implementation note: This is a compatibility wrapper around the state machine. +// It checks if the current state is "usable" (IDLE or IN_USE) and transitions accordingly. +// Deprecated: Use GetStateMachine().TryTransition() directly for better state management. +func (cn *Conn) CompareAndSwapUsable(old, new bool) bool { + currentState := cn.stateMachine.GetState() + + // Check if current state matches the "old" usable value + currentUsable := (currentState == StateIdle || currentState == StateInUse) + if currentUsable != old { + return false + } + + // If we're trying to set to the same value, succeed immediately + if old == new { + return true + } + + // Transition based on new value + if new { + // Trying to make usable - transition from UNUSABLE to IDLE + // This should only work from UNUSABLE or INITIALIZING states + // Use predefined slice to avoid allocation + _, err := cn.stateMachine.TryTransition( + validFromInitializingOrUnusable, + StateIdle, + ) + return err == nil + } + // Trying to make unusable - transition from IDLE to UNUSABLE + // This is typically for acquiring the connection for background operations + // Use predefined slice to avoid allocation + _, err := cn.stateMachine.TryTransition( + validFromIdle, + StateUnusable, + ) + return err == nil +} + +// IsUsable returns true if the connection is safe to use for new commands (lock-free). +// +// A connection is "usable" when it's in a stable state and can be returned to clients. +// It becomes unusable during: +// - Handoff operations (network connection replacement) +// - Re-authentication (credential updates) +// - Other background operations that need exclusive access +// +// Note: CREATED state is considered usable because new connections need to pass OnGet() hook +// before initialization. The initialization happens after OnGet() in the client code. +func (cn *Conn) IsUsable() bool { + state := cn.stateMachine.GetState() + // CREATED, IDLE, and IN_USE states are considered usable + // CREATED: new connection, not yet initialized (will be initialized by client) + // IDLE: initialized and ready to be acquired + // IN_USE: usable but currently acquired by someone + return state == StateCreated || state == StateIdle || state == StateInUse +} + +// SetUsable sets the usable flag for the connection (lock-free). +// +// Deprecated: Use GetStateMachine().Transition() directly for better state management. +// This method is kept for backwards compatibility. +// +// This should be called to mark a connection as usable after initialization or +// to release it after a background operation completes. +// +// Prefer CompareAndSwapUsable() when acquiring exclusive access to avoid race conditions. +// Deprecated: Use GetStateMachine().Transition() directly for better state management. +func (cn *Conn) SetUsable(usable bool) { + if usable { + // Transition to IDLE state (ready to be acquired) + cn.stateMachine.Transition(StateIdle) + } else { + // Transition to UNUSABLE state (for background operations) + cn.stateMachine.Transition(StateUnusable) + } +} + +// IsInited returns true if the connection has been initialized. +// This is a backward-compatible wrapper around the state machine. +func (cn *Conn) IsInited() bool { + state := cn.stateMachine.GetState() + // Connection is initialized if it's in IDLE or any post-initialization state + return state != StateCreated && state != StateInitializing && state != StateClosed +} + +// Used - State machine based implementation + +// CompareAndSwapUsed atomically compares and swaps the used flag (lock-free). +// This method is kept for backwards compatibility. +// +// This is the preferred method for acquiring a connection from the pool, as it +// ensures that only one goroutine marks the connection as used. +// +// Implementation: Uses state machine transitions IDLE ⇄ IN_USE +// +// Returns true if the swap was successful (old value matched), false otherwise. +// Deprecated: Use GetStateMachine().TryTransition() directly for better state management. +func (cn *Conn) CompareAndSwapUsed(old, new bool) bool { + if old == new { + // No change needed + currentState := cn.stateMachine.GetState() + currentUsed := (currentState == StateInUse) + return currentUsed == old + } + + if !old && new { + // Acquiring: IDLE → IN_USE + // Use predefined slice to avoid allocation + _, err := cn.stateMachine.TryTransition(validFromCreatedOrIdle, StateInUse) + return err == nil + } else { + // Releasing: IN_USE → IDLE + // Use predefined slice to avoid allocation + _, err := cn.stateMachine.TryTransition(validFromInUse, StateIdle) + return err == nil + } +} + +// IsUsed returns true if the connection is currently in use (lock-free). +// +// Deprecated: Use GetStateMachine().GetState() == StateInUse directly for better clarity. +// This method is kept for backwards compatibility. +// +// A connection is "used" when it has been retrieved from the pool and is +// actively processing a command. Background operations (like re-auth) should +// wait until the connection is not used before executing commands. +func (cn *Conn) IsUsed() bool { + return cn.stateMachine.GetState() == StateInUse +} + +// SetUsed sets the used flag for the connection (lock-free). +// +// This should be called when returning a connection to the pool (set to false) +// or when a single-connection pool retrieves its connection (set to true). +// +// Prefer CompareAndSwapUsed() when acquiring from a multi-connection pool to +// avoid race conditions. +// Deprecated: Use GetStateMachine().Transition() directly for better state management. +func (cn *Conn) SetUsed(val bool) { + if val { + cn.stateMachine.Transition(StateInUse) + } else { + cn.stateMachine.Transition(StateIdle) + } +} + +// getNetConn returns the current network connection using atomic load (lock-free). +// This is the fast path for accessing netConn without mutex overhead. +func (cn *Conn) getNetConn() net.Conn { + if v := cn.netConnAtomic.Load(); v != nil { + if wrapper, ok := v.(*atomicNetConn); ok { + return wrapper.conn + } + } + return nil +} + +// setNetConn stores the network connection atomically (lock-free). +// This is used for the fast path of connection replacement. +func (cn *Conn) setNetConn(netConn net.Conn) { + cn.netConnAtomic.Store(&atomicNetConn{conn: netConn}) +} + +// Handoff state management - atomic access to handoff metadata + +// ShouldHandoff returns true if connection needs handoff (lock-free). +func (cn *Conn) ShouldHandoff() bool { + if v := cn.handoffStateAtomic.Load(); v != nil { + return v.(*HandoffState).ShouldHandoff + } + return false +} + +// GetHandoffEndpoint returns the new endpoint for handoff (lock-free). +func (cn *Conn) GetHandoffEndpoint() string { + if v := cn.handoffStateAtomic.Load(); v != nil { + return v.(*HandoffState).Endpoint + } + return "" +} + +// GetMovingSeqID returns the sequence ID from the MOVING notification (lock-free). +func (cn *Conn) GetMovingSeqID() int64 { + if v := cn.handoffStateAtomic.Load(); v != nil { + return v.(*HandoffState).SeqID + } + return 0 +} + +// GetHandoffInfo returns all handoff information atomically (lock-free). +// This method prevents race conditions by returning all handoff state in a single atomic operation. +// Returns (shouldHandoff, endpoint, seqID). +func (cn *Conn) GetHandoffInfo() (bool, string, int64) { + if v := cn.handoffStateAtomic.Load(); v != nil { + state := v.(*HandoffState) + return state.ShouldHandoff, state.Endpoint, state.SeqID + } + return false, "", 0 +} + +// HandoffRetries returns the current handoff retry count (lock-free). +func (cn *Conn) HandoffRetries() int { + return int(cn.handoffRetriesAtomic.Load()) +} + +// IncrementAndGetHandoffRetries atomically increments and returns handoff retries (lock-free). +func (cn *Conn) IncrementAndGetHandoffRetries(n int) int { + return int(cn.handoffRetriesAtomic.Add(uint32(n))) +} + +// IsPooled returns true if the connection is managed by a pool and will be pooled on Put. +func (cn *Conn) IsPooled() bool { + return cn.pooled +} + +// IsPubSub returns true if the connection is used for PubSub. +func (cn *Conn) IsPubSub() bool { + return cn.pubsub +} + +// SetRelaxedTimeout sets relaxed timeouts for this connection during maintenanceNotifications upgrades. +// These timeouts will be used for all subsequent commands until the deadline expires. +// Uses atomic operations for lock-free access. +func (cn *Conn) SetRelaxedTimeout(readTimeout, writeTimeout time.Duration) { + cn.relaxedCounter.Add(1) + cn.relaxedReadTimeoutNs.Store(int64(readTimeout)) + cn.relaxedWriteTimeoutNs.Store(int64(writeTimeout)) +} + +// SetRelaxedTimeoutWithDeadline sets relaxed timeouts with an expiration deadline. +// After the deadline, timeouts automatically revert to normal values. +// Uses atomic operations for lock-free access. +func (cn *Conn) SetRelaxedTimeoutWithDeadline(readTimeout, writeTimeout time.Duration, deadline time.Time) { + cn.SetRelaxedTimeout(readTimeout, writeTimeout) + cn.relaxedDeadlineNs.Store(deadline.UnixNano()) +} + +// ClearRelaxedTimeout removes relaxed timeouts, returning to normal timeout behavior. +// Uses atomic operations for lock-free access. +func (cn *Conn) ClearRelaxedTimeout() { + // Atomically decrement counter and check if we should clear + newCount := cn.relaxedCounter.Add(-1) + deadlineNs := cn.relaxedDeadlineNs.Load() + if newCount <= 0 && (deadlineNs == 0 || time.Now().UnixNano() >= deadlineNs) { + // Use atomic load to get current value for CAS to avoid stale value race + current := cn.relaxedCounter.Load() + if current <= 0 && cn.relaxedCounter.CompareAndSwap(current, 0) { + cn.clearRelaxedTimeout() + } + } +} + +func (cn *Conn) clearRelaxedTimeout() { + cn.relaxedReadTimeoutNs.Store(0) + cn.relaxedWriteTimeoutNs.Store(0) + cn.relaxedDeadlineNs.Store(0) + cn.relaxedCounter.Store(0) +} + +// HasRelaxedTimeout returns true if relaxed timeouts are currently active on this connection. +// This checks both the timeout values and the deadline (if set). +// Uses atomic operations for lock-free access. +func (cn *Conn) HasRelaxedTimeout() bool { + // Fast path: no relaxed timeouts are set + if cn.relaxedCounter.Load() <= 0 { + return false + } + + readTimeoutNs := cn.relaxedReadTimeoutNs.Load() + writeTimeoutNs := cn.relaxedWriteTimeoutNs.Load() + + // If no relaxed timeouts are set, return false + if readTimeoutNs <= 0 && writeTimeoutNs <= 0 { + return false + } + + deadlineNs := cn.relaxedDeadlineNs.Load() + // If no deadline is set, relaxed timeouts are active + if deadlineNs == 0 { + return true + } + + // If deadline is set, check if it's still in the future + return time.Now().UnixNano() < deadlineNs +} + +// getEffectiveReadTimeout returns the timeout to use for read operations. +// If relaxed timeout is set and not expired, it takes precedence over the provided timeout. +// This method automatically clears expired relaxed timeouts using atomic operations. +func (cn *Conn) getEffectiveReadTimeout(normalTimeout time.Duration) time.Duration { + readTimeoutNs := cn.relaxedReadTimeoutNs.Load() + + // Fast path: no relaxed timeout set + if readTimeoutNs <= 0 { + return normalTimeout + } + + deadlineNs := cn.relaxedDeadlineNs.Load() + // If no deadline is set, use relaxed timeout + if deadlineNs == 0 { + return time.Duration(readTimeoutNs) + } + + // Use cached time to avoid expensive syscall (max 50ms staleness is acceptable for timeout checks) + nowNs := getCachedTimeNs() + // Check if deadline has passed + if nowNs < deadlineNs { + // Deadline is in the future, use relaxed timeout + return time.Duration(readTimeoutNs) + } else { + // Deadline has passed, clear relaxed timeouts atomically and use normal timeout + newCount := cn.relaxedCounter.Add(-1) + if newCount <= 0 { + internal.Logger.Printf(context.Background(), logs.UnrelaxedTimeoutAfterDeadline(cn.GetID())) + cn.clearRelaxedTimeout() + } + return normalTimeout + } +} + +// getEffectiveWriteTimeout returns the timeout to use for write operations. +// If relaxed timeout is set and not expired, it takes precedence over the provided timeout. +// This method automatically clears expired relaxed timeouts using atomic operations. +func (cn *Conn) getEffectiveWriteTimeout(normalTimeout time.Duration) time.Duration { + writeTimeoutNs := cn.relaxedWriteTimeoutNs.Load() + + // Fast path: no relaxed timeout set + if writeTimeoutNs <= 0 { + return normalTimeout + } + + deadlineNs := cn.relaxedDeadlineNs.Load() + // If no deadline is set, use relaxed timeout + if deadlineNs == 0 { + return time.Duration(writeTimeoutNs) + } + + // Use cached time to avoid expensive syscall (max 50ms staleness is acceptable for timeout checks) + nowNs := getCachedTimeNs() + // Check if deadline has passed + if nowNs < deadlineNs { + // Deadline is in the future, use relaxed timeout + return time.Duration(writeTimeoutNs) + } else { + // Deadline has passed, clear relaxed timeouts atomically and use normal timeout + newCount := cn.relaxedCounter.Add(-1) + if newCount <= 0 { + internal.Logger.Printf(context.Background(), logs.UnrelaxedTimeoutAfterDeadline(cn.GetID())) + cn.clearRelaxedTimeout() + } + return normalTimeout + } +} + +func (cn *Conn) SetOnClose(fn func() error) { + cn.onClose = fn +} + +// SetInitConnFunc sets the connection initialization function to be called on reconnections. +func (cn *Conn) SetInitConnFunc(fn func(context.Context, *Conn) error) { + cn.initConnFunc = fn +} + +// ExecuteInitConn runs the stored connection initialization function if available. +func (cn *Conn) ExecuteInitConn(ctx context.Context) error { + if cn.initConnFunc != nil { + return cn.initConnFunc(ctx, cn) + } + return fmt.Errorf("redis: no initConnFunc set for conn[%d]", cn.GetID()) } func (cn *Conn) SetNetConn(netConn net.Conn) { - cn.netConn = netConn + // Store the new connection atomically first (lock-free) + cn.setNetConn(netConn) + // Protect reader reset operations to avoid data races + // Use write lock since we're modifying the reader state + cn.readerMu.Lock() cn.rd.Reset(netConn) + cn.readerMu.Unlock() + cn.bw.Reset(netConn) - cn.setSysConn() } -func (cn *Conn) setSysConn() { - cn.sysConn = nil - conn := cn.netConn - if conn == nil { - return +// GetNetConn safely returns the current network connection using atomic load (lock-free). +// This method is used by the pool for health checks and provides better performance. +func (cn *Conn) GetNetConn() net.Conn { + return cn.getNetConn() +} + +// SetNetConnAndInitConn replaces the underlying connection and executes the initialization. +// This method ensures only one initialization can happen at a time by using atomic state transitions. +// If another goroutine is currently initializing, this will wait for it to complete. +func (cn *Conn) SetNetConnAndInitConn(ctx context.Context, netConn net.Conn) error { + // Wait for and transition to INITIALIZING state - this prevents concurrent initializations + // Valid from states: CREATED (first init), IDLE (reconnect), UNUSABLE (handoff/reauth) + // If another goroutine is initializing, we'll wait for it to finish + // if the context has a deadline, use that, otherwise use the connection read (relaxed) timeout + // which should be set during handoff. If it is not set, use a 5 second default + deadline, ok := ctx.Deadline() + if !ok { + deadline = time.Now().Add(cn.getEffectiveReadTimeout(5 * time.Second)) + } + waitCtx, cancel := context.WithDeadline(ctx, deadline) + defer cancel() + // Use predefined slice to avoid allocation + finalState, err := cn.stateMachine.AwaitAndTransition( + waitCtx, + validFromCreatedIdleOrUnusable, + StateInitializing, + ) + if err != nil { + return fmt.Errorf("cannot initialize connection from state %s: %w", finalState, err) + } + + // Replace the underlying connection + cn.SetNetConn(netConn) + + // Execute initialization + // NOTE: ExecuteInitConn (via baseClient.initConn) will transition to IDLE on success + // or CLOSED on failure. We don't need to do it here. + // NOTE: Initconn returns conn in IDLE state + initErr := cn.ExecuteInitConn(ctx) + if initErr != nil { + // ExecuteInitConn already transitioned to CLOSED, just return the error + return initErr } - if sysConn, ok := conn.(syscall.Conn); ok { - cn.sysConn = sysConn + // ExecuteInitConn already transitioned to IDLE + return nil +} + +// MarkForHandoff marks the connection for handoff due to MOVING notification. +// Returns an error if the connection is already marked for handoff. +// Note: This only sets metadata - the connection state is not changed until OnPut. +// This allows the current user to finish using the connection before handoff. +func (cn *Conn) MarkForHandoff(newEndpoint string, seqID int64) error { + // Check if already marked for handoff + if cn.ShouldHandoff() { + return errAlreadyMarkedForHandoff } + + // Set handoff metadata atomically + cn.handoffStateAtomic.Store(&HandoffState{ + ShouldHandoff: true, + Endpoint: newEndpoint, + SeqID: seqID, + }) + return nil +} + +// MarkQueuedForHandoff marks the connection as queued for handoff processing. +// This makes the connection unusable until handoff completes. +// This is called from OnPut hook, where the connection is typically in IN_USE state. +// The pool will preserve the UNUSABLE state and not overwrite it with IDLE. +func (cn *Conn) MarkQueuedForHandoff() error { + // Get current handoff state + currentState := cn.handoffStateAtomic.Load() + if currentState == nil { + return errNotMarkedForHandoff + } + + state := currentState.(*HandoffState) + if !state.ShouldHandoff { + return errNotMarkedForHandoff + } + + // Create new state with ShouldHandoff=false but preserve endpoint and seqID + // This prevents the connection from being queued multiple times while still + // allowing the worker to access the handoff metadata + newState := &HandoffState{ + ShouldHandoff: false, + Endpoint: state.Endpoint, // Preserve endpoint for handoff processing + SeqID: state.SeqID, // Preserve seqID for handoff processing + } + + // Atomic compare-and-swap to update state + if !cn.handoffStateAtomic.CompareAndSwap(currentState, newState) { + // State changed between load and CAS - retry or return error + return errHandoffStateChanged + } + + // Transition to UNUSABLE from IN_USE (normal flow), IDLE (edge cases), or CREATED (tests/uninitialized) + // The connection is typically in IN_USE state when OnPut is called (normal Put flow) + // But in some edge cases or tests, it might be in IDLE or CREATED state + // The pool will detect this state change and preserve it (not overwrite with IDLE) + // Use predefined slice to avoid allocation + finalState, err := cn.stateMachine.TryTransition(validFromCreatedInUseOrIdle, StateUnusable) + if err != nil { + // Check if already in UNUSABLE state (race condition or retry) + // ShouldHandoff should be false now, but check just in case + if finalState == StateUnusable && !cn.ShouldHandoff() { + // Already unusable - this is fine, keep the new handoff state + return nil + } + // Restore the original state if transition fails for other reasons + cn.handoffStateAtomic.Store(currentState) + return fmt.Errorf("failed to mark connection as unusable: %w", err) + } + return nil +} + +// GetID returns the unique identifier for this connection. +func (cn *Conn) GetID() uint64 { + return cn.id +} + +// GetStateMachine returns the connection's state machine for advanced state management. +// This is primarily used by internal packages like maintnotifications for handoff processing. +func (cn *Conn) GetStateMachine() *ConnStateMachine { + return cn.stateMachine +} + +// TryAcquire attempts to acquire the connection for use. +// This is an optimized inline method for the hot path (Get operation). +// +// It tries to transition from IDLE -> IN_USE or CREATED -> CREATED. +// Returns true if the connection was successfully acquired, false otherwise. +// The CREATED->CREATED is done so we can keep the state correct for later +// initialization of the connection in initConn. +// +// Performance: This is faster than calling GetStateMachine() + TryTransitionFast() +// +// NOTE: We directly access cn.stateMachine.state here instead of using the state machine's +// methods. This breaks encapsulation but is necessary for performance. +// The IDLE->IN_USE and CREATED->CREATED transitions don't need +// waiter notification, and benchmarks show 1-3% improvement. If the state machine ever +// needs to notify waiters on these transitions, update this to use TryTransitionFast(). +func (cn *Conn) TryAcquire() bool { + // The || operator short-circuits, so only 1 CAS in the common case + return cn.stateMachine.state.CompareAndSwap(uint32(StateIdle), uint32(StateInUse)) || + cn.stateMachine.state.CompareAndSwap(uint32(StateCreated), uint32(StateCreated)) +} + +// Release releases the connection back to the pool. +// This is an optimized inline method for the hot path (Put operation). +// +// It tries to transition from IN_USE -> IDLE. +// Returns true if the connection was successfully released, false otherwise. +// +// Performance: This is faster than calling GetStateMachine() + TryTransitionFast(). +// +// NOTE: We directly access cn.stateMachine.state here instead of using the state machine's +// methods. This breaks encapsulation but is necessary for performance. +// If the state machine ever needs to notify waiters +// on this transition, update this to use TryTransitionFast(). +func (cn *Conn) Release() bool { + // Inline the hot path - single CAS operation + return cn.stateMachine.state.CompareAndSwap(uint32(StateInUse), uint32(StateIdle)) +} + +// ClearHandoffState clears the handoff state after successful handoff. +// Makes the connection usable again. +func (cn *Conn) ClearHandoffState() { + // Clear handoff metadata + cn.handoffStateAtomic.Store(&HandoffState{ + ShouldHandoff: false, + Endpoint: "", + SeqID: 0, + }) + + // Reset retry counter + cn.handoffRetriesAtomic.Store(0) + + // Mark connection as usable again + // Use state machine directly instead of deprecated SetUsable + // probably done by initConn + cn.stateMachine.Transition(StateIdle) +} + +// HasBufferedData safely checks if the connection has buffered data. +// This method is used to avoid data races when checking for push notifications. +func (cn *Conn) HasBufferedData() bool { + // Use read lock for concurrent access to reader state + cn.readerMu.RLock() + defer cn.readerMu.RUnlock() + return cn.rd.Buffered() > 0 +} + +// PeekReplyTypeSafe safely peeks at the reply type. +// This method is used to avoid data races when checking for push notifications. +func (cn *Conn) PeekReplyTypeSafe() (byte, error) { + // Use read lock for concurrent access to reader state + cn.readerMu.RLock() + defer cn.readerMu.RUnlock() + + if cn.rd.Buffered() <= 0 { + return 0, fmt.Errorf("redis: can't peek reply type, no data available") + } + return cn.rd.PeekReplyType() } func (cn *Conn) Write(b []byte) (int, error) { - return cn.netConn.Write(b) + // Lock-free netConn access for better performance + if netConn := cn.getNetConn(); netConn != nil { + return netConn.Write(b) + } + return 0, net.ErrClosed } func (cn *Conn) RemoteAddr() net.Addr { - if cn.netConn != nil { - return cn.netConn.RemoteAddr() + // Lock-free netConn access for better performance + if netConn := cn.getNetConn(); netConn != nil { + return netConn.RemoteAddr() } return nil } @@ -85,7 +806,16 @@ func (cn *Conn) WithReader( ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error, ) error { if timeout >= 0 { - if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil { + // Use relaxed timeout if set, otherwise use provided timeout + effectiveTimeout := cn.getEffectiveReadTimeout(timeout) + + // Get the connection directly from atomic storage + netConn := cn.getNetConn() + if netConn == nil { + return errConnectionNotAvailable + } + + if err := netConn.SetReadDeadline(cn.deadline(ctx, effectiveTimeout)); err != nil { return err } } @@ -96,13 +826,25 @@ func (cn *Conn) WithWriter( ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error, ) error { if timeout >= 0 { - if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil { - return err + // Use relaxed timeout if set, otherwise use provided timeout + effectiveTimeout := cn.getEffectiveWriteTimeout(timeout) + + // Set write deadline on the connection + if netConn := cn.getNetConn(); netConn != nil { + if err := netConn.SetWriteDeadline(cn.deadline(ctx, effectiveTimeout)); err != nil { + return err + } + } else { + // Connection is not available - return preallocated error + return errConnNotAvailableForWrite } } + // Reset the buffered writer if needed, should not happen if cn.bw.Buffered() > 0 { - cn.bw.Reset(cn.netConn) + if netConn := cn.getNetConn(); netConn != nil { + cn.bw.Reset(netConn) + } } if err := fn(cn.wr); err != nil { @@ -112,13 +854,47 @@ func (cn *Conn) WithWriter( return cn.bw.Flush() } +func (cn *Conn) IsClosed() bool { + return cn.closed.Load() || cn.stateMachine.GetState() == StateClosed +} + func (cn *Conn) Close() error { - return cn.netConn.Close() + cn.closed.Store(true) + + // Transition to CLOSED state + cn.stateMachine.Transition(StateClosed) + + if cn.onClose != nil { + // ignore error + _ = cn.onClose() + } + + // Lock-free netConn access for better performance + if netConn := cn.getNetConn(); netConn != nil { + return netConn.Close() + } + return nil +} + +// MaybeHasData tries to peek at the next byte in the socket without consuming it +// This is used to check if there are push notifications available +// Important: This will work on Linux, but not on Windows +func (cn *Conn) MaybeHasData() bool { + // Lock-free netConn access for better performance + if netConn := cn.getNetConn(); netConn != nil { + return maybeHasData(netConn) + } + return false } +// deadline computes the effective deadline time based on context and timeout. +// It updates the usedAt timestamp to now. +// Uses cached time to avoid expensive syscall (max 50ms staleness is acceptable for deadline calculation). func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time { - tm := time.Now() - cn.SetUsedAt(tm) + // Use cached time for deadline calculation (called 2x per command: read + write) + nowNs := getCachedTimeNs() + cn.SetUsedAtNs(nowNs) + tm := time.Unix(0, nowNs) if timeout > 0 { tm = tm.Add(timeout) diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go index f28833850..9e83dd833 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go @@ -5,12 +5,24 @@ package pool import ( "errors" "io" + "net" "syscall" + "time" ) var errUnexpectedRead = errors.New("unexpected read from socket") -func connCheck(sysConn syscall.Conn) error { +// connCheck checks if the connection is still alive and if there is data in the socket +// it will try to peek at the next byte without consuming it since we may want to work with it +// later on (e.g. push notifications) +func connCheck(conn net.Conn) error { + // Reset previous timeout. + _ = conn.SetDeadline(time.Time{}) + + sysConn, ok := conn.(syscall.Conn) + if !ok { + return nil + } rawConn, err := sysConn.SyscallConn() if err != nil { return err @@ -20,7 +32,9 @@ func connCheck(sysConn syscall.Conn) error { if err := rawConn.Read(func(fd uintptr) bool { var buf [1]byte - n, err := syscall.Read(int(fd), buf[:]) + // Use MSG_PEEK to peek at data without consuming it + n, _, err := syscall.Recvfrom(int(fd), buf[:], syscall.MSG_PEEK|syscall.MSG_DONTWAIT) + switch { case n == 0 && err == nil: sysErr = io.EOF @@ -38,3 +52,8 @@ func connCheck(sysConn syscall.Conn) error { return sysErr } + +// maybeHasData checks if there is data in the socket without consuming it +func maybeHasData(conn net.Conn) bool { + return connCheck(conn) == errUnexpectedRead +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go index 2d270cf56..f971d94c4 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go @@ -2,8 +2,19 @@ package pool -import "syscall" +import ( + "errors" + "net" +) -func connCheck(_ syscall.Conn) error { +// errUnexpectedRead is placeholder error variable for non-unix build constraints +var errUnexpectedRead = errors.New("unexpected read from socket") + +func connCheck(_ net.Conn) error { return nil } + +// since we can't check for data on the socket, we just assume there is some +func maybeHasData(_ net.Conn) bool { + return true +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_state.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_state.go new file mode 100644 index 000000000..2050a742b --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_state.go @@ -0,0 +1,343 @@ +package pool + +import ( + "container/list" + "context" + "errors" + "fmt" + "sync" + "sync/atomic" +) + +// ConnState represents the connection state in the state machine. +// States are designed to be lightweight and fast to check. +// +// State Transitions: +// CREATED → INITIALIZING → IDLE ⇄ IN_USE +// ↓ +// UNUSABLE (handoff/reauth) +// ↓ +// IDLE/CLOSED +type ConnState uint32 + +const ( + // StateCreated - Connection just created, not yet initialized + StateCreated ConnState = iota + + // StateInitializing - Connection initialization in progress + StateInitializing + + // StateIdle - Connection initialized and idle in pool, ready to be acquired + StateIdle + + // StateInUse - Connection actively processing a command (retrieved from pool) + StateInUse + + // StateUnusable - Connection temporarily unusable due to background operation + // (handoff, reauth, etc.). Cannot be acquired from pool. + StateUnusable + + // StateClosed - Connection closed + StateClosed +) + +// Predefined state slices to avoid allocations in hot paths +var ( + validFromInUse = []ConnState{StateInUse} + validFromCreatedOrIdle = []ConnState{StateCreated, StateIdle} + validFromCreatedInUseOrIdle = []ConnState{StateCreated, StateInUse, StateIdle} + // For AwaitAndTransition calls + validFromCreatedIdleOrUnusable = []ConnState{StateCreated, StateIdle, StateUnusable} + validFromIdle = []ConnState{StateIdle} + // For CompareAndSwapUsable + validFromInitializingOrUnusable = []ConnState{StateInitializing, StateUnusable} +) + +// Accessor functions for predefined slices to avoid allocations in external packages +// These return the same slice instance, so they're zero-allocation + +// ValidFromIdle returns a predefined slice containing only StateIdle. +// Use this to avoid allocations when calling AwaitAndTransition or TryTransition. +func ValidFromIdle() []ConnState { + return validFromIdle +} + +// ValidFromCreatedIdleOrUnusable returns a predefined slice for initialization transitions. +// Use this to avoid allocations when calling AwaitAndTransition or TryTransition. +func ValidFromCreatedIdleOrUnusable() []ConnState { + return validFromCreatedIdleOrUnusable +} + +// String returns a human-readable string representation of the state. +func (s ConnState) String() string { + switch s { + case StateCreated: + return "CREATED" + case StateInitializing: + return "INITIALIZING" + case StateIdle: + return "IDLE" + case StateInUse: + return "IN_USE" + case StateUnusable: + return "UNUSABLE" + case StateClosed: + return "CLOSED" + default: + return fmt.Sprintf("UNKNOWN(%d)", s) + } +} + +var ( + // ErrInvalidStateTransition is returned when a state transition is not allowed + ErrInvalidStateTransition = errors.New("invalid state transition") + + // ErrStateMachineClosed is returned when operating on a closed state machine + ErrStateMachineClosed = errors.New("state machine is closed") + + // ErrTimeout is returned when a state transition times out + ErrTimeout = errors.New("state transition timeout") +) + +// waiter represents a goroutine waiting for a state transition. +// Designed for minimal allocations and fast processing. +type waiter struct { + validStates map[ConnState]struct{} // States we're waiting for + targetState ConnState // State to transition to + done chan error // Signaled when transition completes or times out +} + +// ConnStateMachine manages connection state transitions with FIFO waiting queue. +// Optimized for: +// - Lock-free reads (hot path) +// - Minimal allocations +// - Fast state transitions +// - FIFO fairness for waiters +// Note: Handoff metadata (endpoint, seqID, retries) is managed separately in the Conn struct. +type ConnStateMachine struct { + // Current state - atomic for lock-free reads + state atomic.Uint32 + + // FIFO queue for waiters - only locked during waiter add/remove/notify + mu sync.Mutex + waiters *list.List // List of *waiter + waiterCount atomic.Int32 // Fast lock-free check for waiters (avoids mutex in hot path) +} + +// NewConnStateMachine creates a new connection state machine. +// Initial state is StateCreated. +func NewConnStateMachine() *ConnStateMachine { + sm := &ConnStateMachine{ + waiters: list.New(), + } + sm.state.Store(uint32(StateCreated)) + return sm +} + +// GetState returns the current state (lock-free read). +// This is the hot path - optimized for zero allocations and minimal overhead. +// Note: Zero allocations applies to state reads; converting the returned state to a string +// (via String()) may allocate if the state is unknown. +func (sm *ConnStateMachine) GetState() ConnState { + return ConnState(sm.state.Load()) +} + +// TryTransitionFast is an optimized version for the hot path (Get/Put operations). +// It only handles simple state transitions without waiter notification. +// This is safe because: +// 1. Get/Put don't need to wait for state changes +// 2. Background operations (handoff/reauth) use UNUSABLE state, which this won't match +// 3. If a background operation is in progress (state is UNUSABLE), this fails fast +// +// Returns true if transition succeeded, false otherwise. +// Use this for performance-critical paths where you don't need error details. +// +// Performance: Single CAS operation - as fast as the old atomic bool! +// For multiple from states, use: sm.TryTransitionFast(State1, Target) || sm.TryTransitionFast(State2, Target) +// The || operator short-circuits, so only 1 CAS is executed in the common case. +func (sm *ConnStateMachine) TryTransitionFast(fromState, targetState ConnState) bool { + return sm.state.CompareAndSwap(uint32(fromState), uint32(targetState)) +} + +// TryTransition attempts an immediate state transition without waiting. +// Returns the current state after the transition attempt and an error if the transition failed. +// The returned state is the CURRENT state (after the attempt), not the previous state. +// This is faster than AwaitAndTransition when you don't need to wait. +// Uses compare-and-swap to atomically transition, preventing concurrent transitions. +// This method does NOT wait - it fails immediately if the transition cannot be performed. +// +// Performance: Zero allocations on success path (hot path). +func (sm *ConnStateMachine) TryTransition(validFromStates []ConnState, targetState ConnState) (ConnState, error) { + // Try each valid from state with CAS + // This ensures only ONE goroutine can successfully transition at a time + for _, fromState := range validFromStates { + // Try to atomically swap from fromState to targetState + // If successful, we won the race and can proceed + if sm.state.CompareAndSwap(uint32(fromState), uint32(targetState)) { + // Success! We transitioned atomically + // Hot path optimization: only check for waiters if transition succeeded + // This avoids atomic load on every Get/Put when no waiters exist + if sm.waiterCount.Load() > 0 { + sm.notifyWaiters() + } + return targetState, nil + } + } + + // All CAS attempts failed - state is not valid for this transition + // Return the current state so caller can decide what to do + // Note: This error path allocates, but it's the exceptional case + currentState := sm.GetState() + return currentState, fmt.Errorf("%w: cannot transition from %s to %s (valid from: %v)", + ErrInvalidStateTransition, currentState, targetState, validFromStates) +} + +// Transition unconditionally transitions to the target state. +// Use with caution - prefer AwaitAndTransition or TryTransition for safety. +// This is useful for error paths or when you know the transition is valid. +func (sm *ConnStateMachine) Transition(targetState ConnState) { + sm.state.Store(uint32(targetState)) + sm.notifyWaiters() +} + +// AwaitAndTransition waits for the connection to reach one of the valid states, +// then atomically transitions to the target state. +// Returns the current state after the transition attempt and an error if the operation failed. +// The returned state is the CURRENT state (after the attempt), not the previous state. +// Returns error if timeout expires or context is cancelled. +// +// This method implements FIFO fairness - the first caller to wait gets priority +// when the state becomes available. +// +// Performance notes: +// - If already in a valid state, this is very fast (no allocation, no waiting) +// - If waiting is required, allocates one waiter struct and one channel +func (sm *ConnStateMachine) AwaitAndTransition( + ctx context.Context, + validFromStates []ConnState, + targetState ConnState, +) (ConnState, error) { + // Fast path: try immediate transition with CAS to prevent race conditions + // BUT: only if there are no waiters in the queue (to maintain FIFO ordering) + if sm.waiterCount.Load() == 0 { + for _, fromState := range validFromStates { + // Check if we're already in target state + if fromState == targetState && sm.GetState() == targetState { + return targetState, nil + } + + // Try to atomically swap from fromState to targetState + if sm.state.CompareAndSwap(uint32(fromState), uint32(targetState)) { + // Success! We transitioned atomically + sm.notifyWaiters() + return targetState, nil + } + } + } + + // Fast path failed - check if we should wait or fail + currentState := sm.GetState() + + // Check if closed + if currentState == StateClosed { + return currentState, ErrStateMachineClosed + } + + // Slow path: need to wait for state change + // Create waiter with valid states map for fast lookup + validStatesMap := make(map[ConnState]struct{}, len(validFromStates)) + for _, s := range validFromStates { + validStatesMap[s] = struct{}{} + } + + w := &waiter{ + validStates: validStatesMap, + targetState: targetState, + done: make(chan error, 1), // Buffered to avoid goroutine leak + } + + // Add to FIFO queue + sm.mu.Lock() + elem := sm.waiters.PushBack(w) + sm.waiterCount.Add(1) + sm.mu.Unlock() + + // Wait for state change or timeout + select { + case <-ctx.Done(): + // Timeout or cancellation - remove from queue + sm.mu.Lock() + sm.waiters.Remove(elem) + sm.waiterCount.Add(-1) + sm.mu.Unlock() + return sm.GetState(), ctx.Err() + case err := <-w.done: + // Transition completed (or failed) + // Note: waiterCount is decremented either in notifyWaiters (when the waiter is notified and removed) + // or here (on timeout/cancellation). + return sm.GetState(), err + } +} + +// notifyWaiters checks if any waiters can proceed and notifies them in FIFO order. +// This is called after every state transition. +func (sm *ConnStateMachine) notifyWaiters() { + // Fast path: check atomic counter without acquiring lock + // This eliminates mutex overhead in the common case (no waiters) + if sm.waiterCount.Load() == 0 { + return + } + + sm.mu.Lock() + defer sm.mu.Unlock() + + // Double-check after acquiring lock (waiters might have been processed) + if sm.waiters.Len() == 0 { + return + } + + // Process waiters in FIFO order until no more can be processed + // We loop instead of recursing to avoid stack overflow and mutex issues + for { + processed := false + + // Find the first waiter that can proceed + for elem := sm.waiters.Front(); elem != nil; elem = elem.Next() { + w := elem.Value.(*waiter) + + // Read current state inside the loop to get the latest value + currentState := sm.GetState() + + // Check if current state is valid for this waiter + if _, valid := w.validStates[currentState]; valid { + // Remove from queue first + sm.waiters.Remove(elem) + sm.waiterCount.Add(-1) + + // Use CAS to ensure state hasn't changed since we checked + // This prevents race condition where another thread changes state + // between our check and our transition + if sm.state.CompareAndSwap(uint32(currentState), uint32(w.targetState)) { + // Successfully transitioned - notify waiter + w.done <- nil + processed = true + break + } else { + // State changed - re-add waiter to front of queue to maintain FIFO ordering + // This waiter was first in line and should retain priority + sm.waiters.PushFront(w) + sm.waiterCount.Add(1) + // Continue to next iteration to re-read state + processed = true + break + } + } + } + + // If we didn't process any waiter, we're done + if !processed { + break + } + } +} + diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/hooks.go b/vendor/github.com/redis/go-redis/v9/internal/pool/hooks.go new file mode 100644 index 000000000..a26e1976d --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/hooks.go @@ -0,0 +1,165 @@ +package pool + +import ( + "context" + "sync" +) + +// PoolHook defines the interface for connection lifecycle hooks. +type PoolHook interface { + // OnGet is called when a connection is retrieved from the pool. + // It can modify the connection or return an error to prevent its use. + // The accept flag can be used to prevent the connection from being used. + // On Accept = false the connection is rejected and returned to the pool. + // The error can be used to prevent the connection from being used and returned to the pool. + // On Errors, the connection is removed from the pool. + // It has isNewConn flag to indicate if this is a new connection (rather than idle from the pool) + // The flag can be used for gathering metrics on pool hit/miss ratio. + OnGet(ctx context.Context, conn *Conn, isNewConn bool) (accept bool, err error) + + // OnPut is called when a connection is returned to the pool. + // It returns whether the connection should be pooled and whether it should be removed. + OnPut(ctx context.Context, conn *Conn) (shouldPool bool, shouldRemove bool, err error) + + // OnRemove is called when a connection is removed from the pool. + // This happens when: + // - Connection fails health check + // - Connection exceeds max lifetime + // - Pool is being closed + // - Connection encounters an error + // Implementations should clean up any per-connection state. + // The reason parameter indicates why the connection was removed. + OnRemove(ctx context.Context, conn *Conn, reason error) +} + +// PoolHookManager manages multiple pool hooks. +type PoolHookManager struct { + hooks []PoolHook + hooksMu sync.RWMutex +} + +// NewPoolHookManager creates a new pool hook manager. +func NewPoolHookManager() *PoolHookManager { + return &PoolHookManager{ + hooks: make([]PoolHook, 0), + } +} + +// AddHook adds a pool hook to the manager. +// Hooks are called in the order they were added. +func (phm *PoolHookManager) AddHook(hook PoolHook) { + phm.hooksMu.Lock() + defer phm.hooksMu.Unlock() + phm.hooks = append(phm.hooks, hook) +} + +// RemoveHook removes a pool hook from the manager. +func (phm *PoolHookManager) RemoveHook(hook PoolHook) { + phm.hooksMu.Lock() + defer phm.hooksMu.Unlock() + + for i, h := range phm.hooks { + if h == hook { + // Remove hook by swapping with last element and truncating + phm.hooks[i] = phm.hooks[len(phm.hooks)-1] + phm.hooks = phm.hooks[:len(phm.hooks)-1] + break + } + } +} + +// ProcessOnGet calls all OnGet hooks in order. +// If any hook returns an error, processing stops and the error is returned. +func (phm *PoolHookManager) ProcessOnGet(ctx context.Context, conn *Conn, isNewConn bool) (acceptConn bool, err error) { + // Copy slice reference while holding lock (fast) + phm.hooksMu.RLock() + hooks := phm.hooks + phm.hooksMu.RUnlock() + + // Call hooks without holding lock (slow operations) + for _, hook := range hooks { + acceptConn, err := hook.OnGet(ctx, conn, isNewConn) + if err != nil { + return false, err + } + + if !acceptConn { + return false, nil + } + } + return true, nil +} + +// ProcessOnPut calls all OnPut hooks in order. +// The first hook that returns shouldRemove=true or shouldPool=false will stop processing. +func (phm *PoolHookManager) ProcessOnPut(ctx context.Context, conn *Conn) (shouldPool bool, shouldRemove bool, err error) { + // Copy slice reference while holding lock (fast) + phm.hooksMu.RLock() + hooks := phm.hooks + phm.hooksMu.RUnlock() + + shouldPool = true // Default to pooling the connection + + // Call hooks without holding lock (slow operations) + for _, hook := range hooks { + hookShouldPool, hookShouldRemove, hookErr := hook.OnPut(ctx, conn) + + if hookErr != nil { + return false, true, hookErr + } + + // If any hook says to remove or not pool, respect that decision + if hookShouldRemove { + return false, true, nil + } + + if !hookShouldPool { + shouldPool = false + } + } + + return shouldPool, false, nil +} + +// ProcessOnRemove calls all OnRemove hooks in order. +func (phm *PoolHookManager) ProcessOnRemove(ctx context.Context, conn *Conn, reason error) { + // Copy slice reference while holding lock (fast) + phm.hooksMu.RLock() + hooks := phm.hooks + phm.hooksMu.RUnlock() + + // Call hooks without holding lock (slow operations) + for _, hook := range hooks { + hook.OnRemove(ctx, conn, reason) + } +} + +// GetHookCount returns the number of registered hooks (for testing). +func (phm *PoolHookManager) GetHookCount() int { + phm.hooksMu.RLock() + defer phm.hooksMu.RUnlock() + return len(phm.hooks) +} + +// GetHooks returns a copy of all registered hooks. +func (phm *PoolHookManager) GetHooks() []PoolHook { + phm.hooksMu.RLock() + defer phm.hooksMu.RUnlock() + + hooks := make([]PoolHook, len(phm.hooks)) + copy(hooks, phm.hooks) + return hooks +} + +// Clone creates a copy of the hook manager with the same hooks. +// This is used for lock-free atomic updates of the hook manager. +func (phm *PoolHookManager) Clone() *PoolHookManager { + phm.hooksMu.RLock() + defer phm.hooksMu.RUnlock() + + newManager := &PoolHookManager{ + hooks: make([]PoolHook, len(phm.hooks)), + } + copy(newManager.hooks, phm.hooks) + return newManager +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go index 9b84993cc..d757d1f4f 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go @@ -9,6 +9,8 @@ import ( "time" "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/internal/util" ) var ( @@ -21,25 +23,48 @@ var ( // ErrPoolTimeout timed out waiting to get a connection from the connection pool. ErrPoolTimeout = errors.New("redis: connection pool timeout") -) -var timers = sync.Pool{ - New: func() interface{} { - t := time.NewTimer(time.Hour) - t.Stop() - return t - }, -} + // ErrConnUnusableTimeout is returned when a connection is not usable and we timed out trying to mark it as unusable. + ErrConnUnusableTimeout = errors.New("redis: timed out trying to mark connection as unusable") + + // errHookRequestedRemoval is returned when a hook requests connection removal. + errHookRequestedRemoval = errors.New("hook requested removal") + + // errConnNotPooled is returned when trying to return a non-pooled connection to the pool. + errConnNotPooled = errors.New("connection not pooled") + + // popAttempts is the maximum number of attempts to find a usable connection + // when popping from the idle connection pool. This handles cases where connections + // are temporarily marked as unusable (e.g., during maintenanceNotifications upgrades or network issues). + // Value of 50 provides sufficient resilience without excessive overhead. + // This is capped by the idle connection count, so we won't loop excessively. + popAttempts = 50 + + // getAttempts is the maximum number of attempts to get a connection that passes + // hook validation (e.g., maintenanceNotifications upgrade hooks). This protects against race conditions + // where hooks might temporarily reject connections during cluster transitions. + // Value of 3 balances resilience with performance - most hook rejections resolve quickly. + getAttempts = 3 + + minTime = time.Unix(-2208988800, 0) // Jan 1, 1900 + maxTime = minTime.Add(1<<63 - 1) + noExpiration = maxTime +) // Stats contains pool state information and accumulated stats. type Stats struct { - Hits uint32 // number of times free connection was found in the pool - Misses uint32 // number of times free connection was NOT found in the pool - Timeouts uint32 // number of times a wait timeout occurred + Hits uint32 // number of times free connection was found in the pool + Misses uint32 // number of times free connection was NOT found in the pool + Timeouts uint32 // number of times a wait timeout occurred + WaitCount uint32 // number of times a connection was waited + Unusable uint32 // number of times a connection was found to be unusable + WaitDurationNs int64 // total time spent for waiting a connection in nanoseconds TotalConns uint32 // number of total connections in the pool IdleConns uint32 // number of idle connections in the pool StaleConns uint32 // number of stale connections removed from the pool + + PubSubStats PubSubStats } type Pooler interface { @@ -54,20 +79,46 @@ type Pooler interface { IdleLen() int Stats() *Stats + // Size returns the maximum pool size (capacity). + // This is used by the streaming credentials manager to size the re-auth worker pool. + Size() int + + AddPoolHook(hook PoolHook) + RemovePoolHook(hook PoolHook) + + // RemoveWithoutTurn removes a connection from the pool without freeing a turn. + // This should be used when removing a connection from a context that didn't acquire + // a turn via Get() (e.g., background workers, cleanup tasks). + // For normal removal after Get(), use Remove() instead. + RemoveWithoutTurn(context.Context, *Conn, error) + Close() error } type Options struct { - Dialer func(context.Context) (net.Conn, error) - - PoolFIFO bool - PoolSize int - PoolTimeout time.Duration - MinIdleConns int - MaxIdleConns int - MaxActiveConns int - ConnMaxIdleTime time.Duration - ConnMaxLifetime time.Duration + Dialer func(context.Context) (net.Conn, error) + ReadBufferSize int + WriteBufferSize int + + PoolFIFO bool + PoolSize int32 + MaxConcurrentDials int + DialTimeout time.Duration + PoolTimeout time.Duration + MinIdleConns int32 + MaxIdleConns int32 + MaxActiveConns int32 + ConnMaxIdleTime time.Duration + ConnMaxLifetime time.Duration + PushNotificationsEnabled bool + + // DialerRetries is the maximum number of retry attempts when dialing fails. + // Default: 5 + DialerRetries int + + // DialerRetryTimeout is the backoff duration between retry attempts. + // Default: 100ms + DialerRetryTimeout time.Duration } type lastDialErrorWrap struct { @@ -80,71 +131,165 @@ type ConnPool struct { dialErrorsNum uint32 // atomic lastDialError atomic.Value - queue chan struct{} + queue chan struct{} + dialsInProgress chan struct{} + dialsQueue *wantConnQueue + // Fast semaphore for connection limiting with eventual fairness + // Uses fast path optimization to avoid timer allocation when tokens are available + semaphore *internal.FastSemaphore connsMu sync.Mutex - conns []*Conn + conns map[uint64]*Conn idleConns []*Conn - poolSize int - idleConnsLen int + poolSize atomic.Int32 + idleConnsLen atomic.Int32 + idleCheckInProgress atomic.Bool + idleCheckNeeded atomic.Bool - stats Stats + stats Stats + waitDurationNs atomic.Int64 _closed uint32 // atomic + + // Pool hooks manager for flexible connection processing + // Using atomic.Pointer for lock-free reads in hot paths (Get/Put) + hookManager atomic.Pointer[PoolHookManager] } var _ Pooler = (*ConnPool)(nil) func NewConnPool(opt *Options) *ConnPool { p := &ConnPool{ - cfg: opt, - - queue: make(chan struct{}, opt.PoolSize), - conns: make([]*Conn, 0, opt.PoolSize), - idleConns: make([]*Conn, 0, opt.PoolSize), + cfg: opt, + semaphore: internal.NewFastSemaphore(opt.PoolSize), + queue: make(chan struct{}, opt.PoolSize), + conns: make(map[uint64]*Conn), + dialsInProgress: make(chan struct{}, opt.MaxConcurrentDials), + dialsQueue: newWantConnQueue(), + idleConns: make([]*Conn, 0, opt.PoolSize), } - p.connsMu.Lock() - p.checkMinIdleConns() - p.connsMu.Unlock() + // Only create MinIdleConns if explicitly requested (> 0) + // This avoids creating connections during pool initialization for tests + if opt.MinIdleConns > 0 { + p.connsMu.Lock() + p.checkMinIdleConns() + p.connsMu.Unlock() + } return p } +// initializeHooks sets up the pool hooks system. +func (p *ConnPool) initializeHooks() { + manager := NewPoolHookManager() + p.hookManager.Store(manager) +} + +// AddPoolHook adds a pool hook to the pool. +func (p *ConnPool) AddPoolHook(hook PoolHook) { + // Lock-free read of current manager + manager := p.hookManager.Load() + if manager == nil { + p.initializeHooks() + manager = p.hookManager.Load() + } + + // Create new manager with added hook + newManager := manager.Clone() + newManager.AddHook(hook) + + // Atomically swap to new manager + p.hookManager.Store(newManager) +} + +// RemovePoolHook removes a pool hook from the pool. +func (p *ConnPool) RemovePoolHook(hook PoolHook) { + manager := p.hookManager.Load() + if manager != nil { + // Create new manager with removed hook + newManager := manager.Clone() + newManager.RemoveHook(hook) + + // Atomically swap to new manager + p.hookManager.Store(newManager) + } +} + func (p *ConnPool) checkMinIdleConns() { + // If a check is already in progress, mark that we need another check and return + if !p.idleCheckInProgress.CompareAndSwap(false, true) { + p.idleCheckNeeded.Store(true) + return + } + if p.cfg.MinIdleConns == 0 { + p.idleCheckInProgress.Store(false) return } - for p.poolSize < p.cfg.PoolSize && p.idleConnsLen < p.cfg.MinIdleConns { - select { - case p.queue <- struct{}{}: - p.poolSize++ - p.idleConnsLen++ + // Keep checking until no more checks are needed + // This handles the case where multiple Remove() calls happen concurrently + for { + // Clear the "check needed" flag before we start + p.idleCheckNeeded.Store(false) + + // Only create idle connections if we haven't reached the total pool size limit + // MinIdleConns should be a subset of PoolSize, not additional connections + for p.poolSize.Load() < p.cfg.PoolSize && p.idleConnsLen.Load() < p.cfg.MinIdleConns { + // Try to acquire a semaphore token + if !p.semaphore.TryAcquire() { + // Semaphore is full, can't create more connections + p.idleCheckInProgress.Store(false) + return + } + + p.poolSize.Add(1) + p.idleConnsLen.Add(1) go func() { + defer func() { + if err := recover(); err != nil { + p.poolSize.Add(-1) + p.idleConnsLen.Add(-1) + + p.freeTurn() + internal.Logger.Printf(context.Background(), "addIdleConn panic: %+v", err) + } + }() + err := p.addIdleConn() if err != nil && err != ErrClosed { - p.connsMu.Lock() - p.poolSize-- - p.idleConnsLen-- - p.connsMu.Unlock() + p.poolSize.Add(-1) + p.idleConnsLen.Add(-1) } - p.freeTurn() }() - default: + } + + // If no one requested another check while we were working, we're done + if !p.idleCheckNeeded.Load() { + p.idleCheckInProgress.Store(false) return } + + // Otherwise, loop again to handle the new requests } } func (p *ConnPool) addIdleConn() error { - cn, err := p.dialConn(context.TODO(), true) + ctx, cancel := context.WithTimeout(context.Background(), p.cfg.DialTimeout) + defer cancel() + + cn, err := p.dialConn(ctx, true) if err != nil { return err } + // NOTE: Connection is in CREATED state and will be initialized by redis.go:initConn() + // when first acquired from the pool. Do NOT transition to IDLE here - that happens + // after initialization completes. + p.connsMu.Lock() defer p.connsMu.Unlock() @@ -154,11 +299,15 @@ func (p *ConnPool) addIdleConn() error { return ErrClosed } - p.conns = append(p.conns, cn) + p.conns[cn.GetID()] = cn p.idleConns = append(p.idleConns, cn) return nil } +// NewConn creates a new connection and returns it to the user. +// This will still obey MaxActiveConns but will not include it in the pool and won't increase the pool size. +// +// NOTE: If you directly get a connection from the pool, it won't be pooled and won't support maintnotifications upgrades. func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) { return p.newConn(ctx, false) } @@ -168,33 +317,45 @@ func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) { return nil, ErrClosed } - p.connsMu.Lock() - if p.cfg.MaxActiveConns > 0 && p.poolSize >= p.cfg.MaxActiveConns { - p.connsMu.Unlock() + if p.cfg.MaxActiveConns > 0 && p.poolSize.Load() >= p.cfg.MaxActiveConns { return nil, ErrPoolExhausted } - p.connsMu.Unlock() - cn, err := p.dialConn(ctx, pooled) + dialCtx, cancel := context.WithTimeout(ctx, p.cfg.DialTimeout) + defer cancel() + cn, err := p.dialConn(dialCtx, pooled) if err != nil { return nil, err } - p.connsMu.Lock() - defer p.connsMu.Unlock() + // NOTE: Connection is in CREATED state and will be initialized by redis.go:initConn() + // when first used. Do NOT transition to IDLE here - that happens after initialization completes. + // The state machine flow is: CREATED → INITIALIZING (in initConn) → IDLE (after init success) - if p.cfg.MaxActiveConns > 0 && p.poolSize >= p.cfg.MaxActiveConns { + if p.cfg.MaxActiveConns > 0 && p.poolSize.Load() > p.cfg.MaxActiveConns { _ = cn.Close() return nil, ErrPoolExhausted } - p.conns = append(p.conns, cn) + p.connsMu.Lock() + defer p.connsMu.Unlock() + if p.closed() { + _ = cn.Close() + return nil, ErrClosed + } + // Check if pool was closed while we were waiting for the lock + if p.conns == nil { + p.conns = make(map[uint64]*Conn) + } + p.conns[cn.GetID()] = cn + if pooled { // If pool is full remove the cn on next Put. - if p.poolSize >= p.cfg.PoolSize { + currentPoolSize := p.poolSize.Load() + if currentPoolSize >= p.cfg.PoolSize { cn.pooled = false } else { - p.poolSize++ + p.poolSize.Add(1) } } @@ -210,18 +371,58 @@ func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) { return nil, p.getLastDialError() } - netConn, err := p.cfg.Dialer(ctx) - if err != nil { - p.setLastDialError(err) - if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.cfg.PoolSize) { - go p.tryDial() + // Retry dialing with backoff + // the context timeout is already handled by the context passed in + // so we may never reach the max retries, higher values don't hurt + maxRetries := p.cfg.DialerRetries + if maxRetries <= 0 { + maxRetries = 5 // Default value + } + backoffDuration := p.cfg.DialerRetryTimeout + if backoffDuration <= 0 { + backoffDuration = 100 * time.Millisecond // Default value + } + + var lastErr error + shouldLoop := true + // when the timeout is reached, we should stop retrying + // but keep the lastErr to return to the caller + // instead of a generic context deadline exceeded error + attempt := 0 + for attempt = 0; (attempt < maxRetries) && shouldLoop; attempt++ { + netConn, err := p.cfg.Dialer(ctx) + if err != nil { + lastErr = err + // Add backoff delay for retry attempts + // (not for the first attempt, do at least one) + select { + case <-ctx.Done(): + shouldLoop = false + case <-time.After(backoffDuration): + // Continue with retry + } + continue } - return nil, err + + // Success - create connection + cn := NewConnWithBufferSize(netConn, p.cfg.ReadBufferSize, p.cfg.WriteBufferSize) + cn.pooled = pooled + if p.cfg.ConnMaxLifetime > 0 { + cn.expiresAt = time.Now().Add(p.cfg.ConnMaxLifetime) + } else { + cn.expiresAt = noExpiration + } + + return cn, nil } - cn := NewConn(netConn) - cn.pooled = pooled - return cn, nil + internal.Logger.Printf(ctx, "redis: connection pool: failed to dial after %d attempts: %v", attempt, lastErr) + // All retries failed - handle error tracking + p.setLastDialError(lastErr) + if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.cfg.PoolSize) { + go p.tryDial() + } + return nil, lastErr } func (p *ConnPool) tryDial() { @@ -230,15 +431,19 @@ func (p *ConnPool) tryDial() { return } - conn, err := p.cfg.Dialer(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), p.cfg.DialTimeout) + + conn, err := p.cfg.Dialer(ctx) if err != nil { p.setLastDialError(err) time.Sleep(time.Second) + cancel() continue } atomic.StoreUint32(&p.dialErrorsNum, 0) _ = conn.Close() + cancel() return } } @@ -257,6 +462,14 @@ func (p *ConnPool) getLastDialError() error { // Get returns existed connection from the pool or creates a new one. func (p *ConnPool) Get(ctx context.Context) (*Conn, error) { + return p.getConn(ctx) +} + +// getConn returns a connection from the pool. +func (p *ConnPool) getConn(ctx context.Context) (*Conn, error) { + var cn *Conn + var err error + if p.closed() { return nil, ErrClosed } @@ -265,9 +478,16 @@ func (p *ConnPool) Get(ctx context.Context) (*Conn, error) { return nil, err } - for { + // Use cached time for health checks (max 50ms staleness is acceptable) + nowNs := getCachedTimeNs() + + // Lock-free atomic read - no mutex overhead! + hookManager := p.hookManager.Load() + + for attempts := 0; attempts < getAttempts; attempts++ { + p.connsMu.Lock() - cn, err := p.popIdle() + cn, err = p.popIdle() p.connsMu.Unlock() if err != nil { @@ -279,127 +499,394 @@ func (p *ConnPool) Get(ctx context.Context) (*Conn, error) { break } - if !p.isHealthyConn(cn) { + if !p.isHealthyConn(cn, nowNs) { _ = p.CloseConn(cn) continue } + // Process connection using the hooks system + // Combine error and rejection checks to reduce branches + if hookManager != nil { + acceptConn, err := hookManager.ProcessOnGet(ctx, cn, false) + if err != nil || !acceptConn { + if err != nil { + internal.Logger.Printf(ctx, "redis: connection pool: failed to process idle connection by hook: %v", err) + _ = p.CloseConn(cn) + } else { + internal.Logger.Printf(ctx, "redis: connection pool: conn[%d] rejected by hook, returning to pool", cn.GetID()) + // Return connection to pool without freeing the turn that this Get() call holds. + // We use putConnWithoutTurn() to run all the Put hooks and logic without freeing a turn. + p.putConnWithoutTurn(ctx, cn) + cn = nil + } + continue + } + } + atomic.AddUint32(&p.stats.Hits, 1) return cn, nil } atomic.AddUint32(&p.stats.Misses, 1) - newcn, err := p.newConn(ctx, true) + newcn, err := p.queuedNewConn(ctx) if err != nil { - p.freeTurn() return nil, err } + // Process connection using the hooks system + if hookManager != nil { + acceptConn, err := hookManager.ProcessOnGet(ctx, newcn, true) + // both errors and accept=false mean a hook rejected the connection + // this should not happen with a new connection, but we handle it gracefully + if err != nil || !acceptConn { + // Failed to process connection, discard it + internal.Logger.Printf(ctx, "redis: connection pool: failed to process new connection conn[%d] by hook: accept=%v, err=%v", newcn.GetID(), acceptConn, err) + _ = p.CloseConn(newcn) + return nil, err + } + } return newcn, nil } -func (p *ConnPool) waitTurn(ctx context.Context) error { +func (p *ConnPool) queuedNewConn(ctx context.Context) (*Conn, error) { select { + case p.dialsInProgress <- struct{}{}: + // Got permission, proceed to create connection case <-ctx.Done(): - return ctx.Err() - default: + p.freeTurn() + return nil, ctx.Err() } - select { - case p.queue <- struct{}{}: - return nil - default: + dialCtx, cancel := context.WithTimeout(context.Background(), p.cfg.DialTimeout) + + w := &wantConn{ + ctx: dialCtx, + cancelCtx: cancel, + result: make(chan wantConnResult, 1), } + var err error + defer func() { + if err != nil { + if cn := w.cancel(); cn != nil && p.putIdleConn(ctx, cn) { + p.freeTurn() + } + } + }() + + p.dialsQueue.enqueue(w) + + go func(w *wantConn) { + var freeTurnCalled bool + defer func() { + if err := recover(); err != nil { + if !freeTurnCalled { + p.freeTurn() + } + internal.Logger.Printf(context.Background(), "queuedNewConn panic: %+v", err) + } + }() + + defer w.cancelCtx() + defer func() { <-p.dialsInProgress }() // Release connection creation permission + + dialCtx := w.getCtxForDial() + cn, cnErr := p.newConn(dialCtx, true) + if cnErr != nil { + w.tryDeliver(nil, cnErr) // deliver error to caller, notify connection creation failed + p.freeTurn() + freeTurnCalled = true + return + } - timer := timers.Get().(*time.Timer) - timer.Reset(p.cfg.PoolTimeout) + delivered := w.tryDeliver(cn, cnErr) + if !delivered && p.putIdleConn(dialCtx, cn) { + p.freeTurn() + freeTurnCalled = true + } + }(w) select { case <-ctx.Done(): - if !timer.Stop() { - <-timer.C + err = ctx.Err() + return nil, err + case result := <-w.result: + err = result.err + return result.cn, err + } +} + +// putIdleConn puts a connection back to the pool or passes it to the next waiting request. +// +// It returns true if the connection was put back to the pool, +// which means the turn needs to be freed directly by the caller, +// or false if the connection was passed to the next waiting request, +// which means the turn will be freed by the waiting goroutine after it returns. +func (p *ConnPool) putIdleConn(ctx context.Context, cn *Conn) bool { + for { + w, ok := p.dialsQueue.dequeue() + if !ok { + break } - timers.Put(timer) - return ctx.Err() - case p.queue <- struct{}{}: - if !timer.Stop() { - <-timer.C + if w.tryDeliver(cn, nil) { + return false } - timers.Put(timer) + } + + p.connsMu.Lock() + defer p.connsMu.Unlock() + + if p.closed() { + _ = cn.Close() + return true + } + + // poolSize is increased in newConn + p.idleConns = append(p.idleConns, cn) + p.idleConnsLen.Add(1) + + return true +} + +func (p *ConnPool) waitTurn(ctx context.Context) error { + // Fast path: check context first + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Fast path: try to acquire without blocking + if p.semaphore.TryAcquire() { return nil - case <-timer.C: - timers.Put(timer) + } + + // Slow path: need to wait + start := time.Now() + err := p.semaphore.Acquire(ctx, p.cfg.PoolTimeout, ErrPoolTimeout) + + switch err { + case nil: + // Successfully acquired after waiting + p.waitDurationNs.Add(time.Now().UnixNano() - start.UnixNano()) + atomic.AddUint32(&p.stats.WaitCount, 1) + case ErrPoolTimeout: atomic.AddUint32(&p.stats.Timeouts, 1) - return ErrPoolTimeout } + + return err } func (p *ConnPool) freeTurn() { - <-p.queue + p.semaphore.Release() } func (p *ConnPool) popIdle() (*Conn, error) { if p.closed() { return nil, ErrClosed } + defer p.checkMinIdleConns() + n := len(p.idleConns) if n == 0 { return nil, nil } var cn *Conn - if p.cfg.PoolFIFO { - cn = p.idleConns[0] - copy(p.idleConns, p.idleConns[1:]) - p.idleConns = p.idleConns[:n-1] - } else { - idx := n - 1 - cn = p.idleConns[idx] - p.idleConns = p.idleConns[:idx] + attempts := 0 + + maxAttempts := util.Min(popAttempts, n) + for attempts < maxAttempts { + if len(p.idleConns) == 0 { + return nil, nil + } + + if p.cfg.PoolFIFO { + cn = p.idleConns[0] + copy(p.idleConns, p.idleConns[1:]) + p.idleConns = p.idleConns[:len(p.idleConns)-1] + } else { + idx := len(p.idleConns) - 1 + cn = p.idleConns[idx] + p.idleConns = p.idleConns[:idx] + } + attempts++ + + // Hot path optimization: try IDLE → IN_USE or CREATED → IN_USE transition + // Using inline TryAcquire() method for better performance (avoids pointer dereference) + if cn.TryAcquire() { + // Successfully acquired the connection + p.idleConnsLen.Add(-1) + break + } + + // Connection is in UNUSABLE, INITIALIZING, or other state - skip it + + // Connection is not in a valid state (might be UNUSABLE for handoff/re-auth, INITIALIZING, etc.) + // Put it back in the pool and try the next one + if p.cfg.PoolFIFO { + // FIFO: put at end (will be picked up last since we pop from front) + p.idleConns = append(p.idleConns, cn) + } else { + // LIFO: put at beginning (will be picked up last since we pop from end) + p.idleConns = append([]*Conn{cn}, p.idleConns...) + } + cn = nil } - p.idleConnsLen-- - p.checkMinIdleConns() + + // If we exhausted all attempts without finding a usable connection, return nil + if attempts > 1 && attempts >= maxAttempts && int32(attempts) >= p.poolSize.Load() { + internal.Logger.Printf(context.Background(), "redis: connection pool: failed to get a usable connection after %d attempts", attempts) + return nil, nil + } + return cn, nil } func (p *ConnPool) Put(ctx context.Context, cn *Conn) { - if cn.rd.Buffered() > 0 { - internal.Logger.Printf(ctx, "Conn has unread data") - p.Remove(ctx, cn, BadConnError{}) + p.putConn(ctx, cn, true) +} + +// putConnWithoutTurn is an internal method that puts a connection back to the pool +// without freeing a turn. This is used when returning a rejected connection from +// within Get(), where the turn is still held by the Get() call. +func (p *ConnPool) putConnWithoutTurn(ctx context.Context, cn *Conn) { + p.putConn(ctx, cn, false) +} + +// putConn is the internal implementation of Put that optionally frees a turn. +func (p *ConnPool) putConn(ctx context.Context, cn *Conn, freeTurn bool) { + // Process connection using the hooks system + shouldPool := true + shouldRemove := false + var err error + + if cn.HasBufferedData() { + // Peek at the reply type to check if it's a push notification + if replyType, err := cn.PeekReplyTypeSafe(); err != nil || replyType != proto.RespPush { + // Not a push notification or error peeking, remove connection + internal.Logger.Printf(ctx, "Conn has unread data (not push notification), removing it") + p.removeConnInternal(ctx, cn, err, freeTurn) + return + } + // It's a push notification, allow pooling (client will handle it) + } + + // Lock-free atomic read - no mutex overhead! + hookManager := p.hookManager.Load() + + if hookManager != nil { + shouldPool, shouldRemove, err = hookManager.ProcessOnPut(ctx, cn) + if err != nil { + internal.Logger.Printf(ctx, "Connection hook error: %v", err) + p.removeConnInternal(ctx, cn, err, freeTurn) + return + } + } + + // Combine all removal checks into one - reduces branches + if shouldRemove || !shouldPool { + p.removeConnInternal(ctx, cn, errHookRequestedRemoval, freeTurn) return } if !cn.pooled { - p.Remove(ctx, cn, nil) + p.removeConnInternal(ctx, cn, errConnNotPooled, freeTurn) return } var shouldCloseConn bool - p.connsMu.Lock() + if p.cfg.MaxIdleConns == 0 || p.idleConnsLen.Load() < p.cfg.MaxIdleConns { + // Hot path optimization: try fast IN_USE → IDLE transition + // Using inline Release() method for better performance (avoids pointer dereference) + transitionedToIdle := cn.Release() + + // Handle unexpected state changes + if !transitionedToIdle { + // Fast path failed - hook might have changed state (e.g., to UNUSABLE for handoff) + // Keep the state set by the hook and pool the connection anyway + currentState := cn.GetStateMachine().GetState() + switch currentState { + case StateUnusable: + // expected state, don't log it + case StateClosed: + internal.Logger.Printf(ctx, "Unexpected conn[%d] state changed by hook to %v, closing it", cn.GetID(), currentState) + shouldCloseConn = true + p.removeConnWithLock(cn) + default: + // Pool as-is + internal.Logger.Printf(ctx, "Unexpected conn[%d] state changed by hook to %v, pooling as-is", cn.GetID(), currentState) + } + } - if p.cfg.MaxIdleConns == 0 || p.idleConnsLen < p.cfg.MaxIdleConns { - p.idleConns = append(p.idleConns, cn) - p.idleConnsLen++ + // unusable conns are expected to become usable at some point (background process is reconnecting them) + // put them at the opposite end of the queue + // Optimization: if we just transitioned to IDLE, we know it's usable - skip the check + if !transitionedToIdle && !cn.IsUsable() { + if p.cfg.PoolFIFO { + p.connsMu.Lock() + p.idleConns = append(p.idleConns, cn) + p.connsMu.Unlock() + } else { + p.connsMu.Lock() + p.idleConns = append([]*Conn{cn}, p.idleConns...) + p.connsMu.Unlock() + } + p.idleConnsLen.Add(1) + } else if !shouldCloseConn { + p.connsMu.Lock() + p.idleConns = append(p.idleConns, cn) + p.connsMu.Unlock() + p.idleConnsLen.Add(1) + } } else { - p.removeConn(cn) shouldCloseConn = true + p.removeConnWithLock(cn) } - p.connsMu.Unlock() - - p.freeTurn() + if freeTurn { + p.freeTurn() + } if shouldCloseConn { _ = p.closeConn(cn) } + + cn.SetLastPutAtNs(getCachedTimeNs()) +} + +func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) { + p.removeConnInternal(ctx, cn, reason, true) +} + +// RemoveWithoutTurn removes a connection from the pool without freeing a turn. +// This should be used when removing a connection from a context that didn't acquire +// a turn via Get() (e.g., background workers, cleanup tasks). +// For normal removal after Get(), use Remove() instead. +func (p *ConnPool) RemoveWithoutTurn(ctx context.Context, cn *Conn, reason error) { + p.removeConnInternal(ctx, cn, reason, false) } -func (p *ConnPool) Remove(_ context.Context, cn *Conn, reason error) { +// removeConnInternal is the internal implementation of Remove that optionally frees a turn. +func (p *ConnPool) removeConnInternal(ctx context.Context, cn *Conn, reason error, freeTurn bool) { + // Lock-free atomic read - no mutex overhead! + hookManager := p.hookManager.Load() + + if hookManager != nil { + hookManager.ProcessOnRemove(ctx, cn, reason) + } + p.removeConnWithLock(cn) - p.freeTurn() + + if freeTurn { + p.freeTurn() + } + _ = p.closeConn(cn) + + // Check if we need to create new idle connections to maintain MinIdleConns + p.checkMinIdleConns() } func (p *ConnPool) CloseConn(cn *Conn) error { @@ -414,17 +901,22 @@ func (p *ConnPool) removeConnWithLock(cn *Conn) { } func (p *ConnPool) removeConn(cn *Conn) { - for i, c := range p.conns { - if c == cn { - p.conns = append(p.conns[:i], p.conns[i+1:]...) - if cn.pooled { - p.poolSize-- - p.checkMinIdleConns() + cid := cn.GetID() + delete(p.conns, cid) + atomic.AddUint32(&p.stats.StaleConns, 1) + + // Decrement pool size counter when removing a connection + if cn.pooled { + p.poolSize.Add(-1) + // this can be idle conn + for idx, ic := range p.idleConns { + if ic == cn { + p.idleConns = append(p.idleConns[:idx], p.idleConns[idx+1:]...) + p.idleConnsLen.Add(-1) + break } - break } } - atomic.AddUint32(&p.stats.StaleConns, 1) } func (p *ConnPool) closeConn(cn *Conn) error { @@ -442,16 +934,27 @@ func (p *ConnPool) Len() int { // IdleLen returns number of idle connections. func (p *ConnPool) IdleLen() int { p.connsMu.Lock() - n := p.idleConnsLen + n := p.idleConnsLen.Load() p.connsMu.Unlock() - return n + return int(n) +} + +// Size returns the maximum pool size (capacity). +// +// This is used by the streaming credentials manager to size the re-auth worker pool, +// ensuring that re-auth operations don't exhaust the connection pool. +func (p *ConnPool) Size() int { + return int(p.cfg.PoolSize) } func (p *ConnPool) Stats() *Stats { return &Stats{ - Hits: atomic.LoadUint32(&p.stats.Hits), - Misses: atomic.LoadUint32(&p.stats.Misses), - Timeouts: atomic.LoadUint32(&p.stats.Timeouts), + Hits: atomic.LoadUint32(&p.stats.Hits), + Misses: atomic.LoadUint32(&p.stats.Misses), + Timeouts: atomic.LoadUint32(&p.stats.Timeouts), + WaitCount: atomic.LoadUint32(&p.stats.WaitCount), + Unusable: atomic.LoadUint32(&p.stats.Unusable), + WaitDurationNs: p.waitDurationNs.Load(), TotalConns: uint32(p.Len()), IdleConns: uint32(p.IdleLen()), @@ -491,34 +994,62 @@ func (p *ConnPool) Close() error { } } p.conns = nil - p.poolSize = 0 + p.poolSize.Store(0) p.idleConns = nil - p.idleConnsLen = 0 + p.idleConnsLen.Store(0) p.connsMu.Unlock() return firstErr } -var zeroTime = time.Time{} - -func (p *ConnPool) isHealthyConn(cn *Conn) bool { - now := time.Now() +func (p *ConnPool) isHealthyConn(cn *Conn, nowNs int64) bool { + // Performance optimization: check conditions from cheapest to most expensive, + // and from most likely to fail to least likely to fail. - if p.cfg.ConnMaxLifetime > 0 && now.Sub(cn.createdAt) >= p.cfg.ConnMaxLifetime { - return false + // Only fails if ConnMaxLifetime is set AND connection is old. + // Most pools don't set ConnMaxLifetime, so this rarely fails. + if p.cfg.ConnMaxLifetime > 0 { + if cn.expiresAt.UnixNano() < nowNs { + return false // Connection has exceeded max lifetime + } } - if p.cfg.ConnMaxIdleTime > 0 && now.Sub(cn.UsedAt()) >= p.cfg.ConnMaxIdleTime { - return false + + // Most pools set ConnMaxIdleTime, and idle connections are common. + // Checking this first allows us to fail fast without expensive syscalls. + if p.cfg.ConnMaxIdleTime > 0 { + if nowNs-cn.UsedAtNs() >= int64(p.cfg.ConnMaxIdleTime) { + return false // Connection has been idle too long + } } - if cn.sysConn != nil { - // reset previous timeout. - _ = cn.netConn.SetDeadline(zeroTime) - if connCheck(cn.sysConn) != nil { + // Only run this if the cheap checks passed. + if err := connCheck(cn.getNetConn()); err != nil { + // If there's unexpected data, it might be push notifications (RESP3) + if p.cfg.PushNotificationsEnabled && err == errUnexpectedRead { + // Peek at the reply type to check if it's a push notification + if replyType, err := cn.rd.PeekReplyType(); err == nil && replyType == proto.RespPush { + // For RESP3 connections with push notifications, we allow some buffered data + // The client will process these notifications before using the connection + internal.Logger.Printf( + context.Background(), + "push: conn[%d] has buffered data, likely push notifications - will be processed by client", + cn.GetID(), + ) + + // Update timestamp for healthy connection + cn.SetUsedAtNs(nowNs) + + // Connection is healthy, client will handle notifications + return true + } + // Not a push notification - treat as unhealthy return false } + // Connection failed health check + return false } - cn.SetUsedAt(now) + // Only update UsedAt if connection is healthy (avoids unnecessary atomic store) + cn.SetUsedAtNs(nowNs) return true } diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pool_single.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pool_single.go index 5a3fde191..365219a57 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/pool_single.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pool_single.go @@ -1,7 +1,13 @@ package pool -import "context" +import ( + "context" + "time" +) +// SingleConnPool is a pool that always returns the same connection. +// Note: This pool is not thread-safe. +// It is intended to be used by clients that need a single connection. type SingleConnPool struct { pool Pooler cn *Conn @@ -10,6 +16,12 @@ type SingleConnPool struct { var _ Pooler = (*SingleConnPool)(nil) +// NewSingleConnPool creates a new single connection pool. +// The pool will always return the same connection. +// The pool will not: +// - Close the connection +// - Reconnect the connection +// - Track the connection in any way func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool { return &SingleConnPool{ pool: pool, @@ -25,20 +37,47 @@ func (p *SingleConnPool) CloseConn(cn *Conn) error { return p.pool.CloseConn(cn) } -func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) { +func (p *SingleConnPool) Get(_ context.Context) (*Conn, error) { if p.stickyErr != nil { return nil, p.stickyErr } + if p.cn == nil { + return nil, ErrClosed + } + + // NOTE: SingleConnPool is NOT thread-safe by design and is used in special scenarios: + // - During initialization (connection is in INITIALIZING state) + // - During re-authentication (connection is in UNUSABLE state) + // - For transactions (connection might be in various states) + // We use SetUsed() which forces the transition, rather than TryTransition() which + // would fail if the connection is not in IDLE/CREATED state. + p.cn.SetUsed(true) + p.cn.SetUsedAt(time.Now()) return p.cn, nil } -func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {} +func (p *SingleConnPool) Put(_ context.Context, cn *Conn) { + if p.cn == nil { + return + } + if p.cn != cn { + return + } + p.cn.SetUsed(false) +} -func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) { +func (p *SingleConnPool) Remove(_ context.Context, cn *Conn, reason error) { + cn.SetUsed(false) p.cn = nil p.stickyErr = reason } +// RemoveWithoutTurn has the same behavior as Remove for SingleConnPool +// since SingleConnPool doesn't use a turn-based queue system. +func (p *SingleConnPool) RemoveWithoutTurn(ctx context.Context, cn *Conn, reason error) { + p.Remove(ctx, cn, reason) +} + func (p *SingleConnPool) Close() error { p.cn = nil p.stickyErr = ErrClosed @@ -53,6 +92,13 @@ func (p *SingleConnPool) IdleLen() int { return 0 } +// Size returns the maximum pool size, which is always 1 for SingleConnPool. +func (p *SingleConnPool) Size() int { return 1 } + func (p *SingleConnPool) Stats() *Stats { return &Stats{} } + +func (p *SingleConnPool) AddPoolHook(_ PoolHook) {} + +func (p *SingleConnPool) RemovePoolHook(_ PoolHook) {} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pool_sticky.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pool_sticky.go index 3adb99bc8..be869b569 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/pool_sticky.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pool_sticky.go @@ -123,6 +123,12 @@ func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) { p.ch <- cn } +// RemoveWithoutTurn has the same behavior as Remove for StickyConnPool +// since StickyConnPool doesn't use a turn-based queue system. +func (p *StickyConnPool) RemoveWithoutTurn(ctx context.Context, cn *Conn, reason error) { + p.Remove(ctx, cn, reason) +} + func (p *StickyConnPool) Close() error { if shared := atomic.AddInt32(&p.shared, -1); shared > 0 { return nil @@ -196,6 +202,13 @@ func (p *StickyConnPool) IdleLen() int { return len(p.ch) } +// Size returns the maximum pool size, which is always 1 for StickyConnPool. +func (p *StickyConnPool) Size() int { return 1 } + func (p *StickyConnPool) Stats() *Stats { return &Stats{} } + +func (p *StickyConnPool) AddPoolHook(hook PoolHook) {} + +func (p *StickyConnPool) RemovePoolHook(hook PoolHook) {} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pubsub.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pubsub.go new file mode 100644 index 000000000..5b29659ea --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pubsub.go @@ -0,0 +1,80 @@ +package pool + +import ( + "context" + "net" + "sync" + "sync/atomic" +) + +type PubSubStats struct { + Created uint32 + Untracked uint32 + Active uint32 +} + +// PubSubPool manages a pool of PubSub connections. +type PubSubPool struct { + opt *Options + netDialer func(ctx context.Context, network, addr string) (net.Conn, error) + + // Map to track active PubSub connections + activeConns sync.Map // map[uint64]*Conn (connID -> conn) + closed atomic.Bool + stats PubSubStats +} + +// NewPubSubPool implements a pool for PubSub connections. +// It intentionally does not implement the Pooler interface +func NewPubSubPool(opt *Options, netDialer func(ctx context.Context, network, addr string) (net.Conn, error)) *PubSubPool { + return &PubSubPool{ + opt: opt, + netDialer: netDialer, + } +} + +func (p *PubSubPool) NewConn(ctx context.Context, network string, addr string, channels []string) (*Conn, error) { + if p.closed.Load() { + return nil, ErrClosed + } + + netConn, err := p.netDialer(ctx, network, addr) + if err != nil { + return nil, err + } + cn := NewConnWithBufferSize(netConn, p.opt.ReadBufferSize, p.opt.WriteBufferSize) + cn.pubsub = true + atomic.AddUint32(&p.stats.Created, 1) + return cn, nil + +} + +func (p *PubSubPool) TrackConn(cn *Conn) { + atomic.AddUint32(&p.stats.Active, 1) + p.activeConns.Store(cn.GetID(), cn) +} + +func (p *PubSubPool) UntrackConn(cn *Conn) { + atomic.AddUint32(&p.stats.Active, ^uint32(0)) + atomic.AddUint32(&p.stats.Untracked, 1) + p.activeConns.Delete(cn.GetID()) +} + +func (p *PubSubPool) Close() error { + p.closed.Store(true) + p.activeConns.Range(func(key, value interface{}) bool { + cn := value.(*Conn) + _ = cn.Close() + return true + }) + return nil +} + +func (p *PubSubPool) Stats() *PubSubStats { + // load stats atomically + return &PubSubStats{ + Created: atomic.LoadUint32(&p.stats.Created), + Untracked: atomic.LoadUint32(&p.stats.Untracked), + Active: atomic.LoadUint32(&p.stats.Active), + } +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/want_conn.go b/vendor/github.com/redis/go-redis/v9/internal/pool/want_conn.go new file mode 100644 index 000000000..6f9e4bfa9 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/want_conn.go @@ -0,0 +1,93 @@ +package pool + +import ( + "context" + "sync" +) + +type wantConn struct { + mu sync.Mutex // protects ctx, done and sending of the result + ctx context.Context // context for dial, cleared after delivered or canceled + cancelCtx context.CancelFunc + done bool // true after delivered or canceled + result chan wantConnResult // channel to deliver connection or error +} + +// getCtxForDial returns context for dial or nil if connection was delivered or canceled. +func (w *wantConn) getCtxForDial() context.Context { + w.mu.Lock() + defer w.mu.Unlock() + + return w.ctx +} + +func (w *wantConn) tryDeliver(cn *Conn, err error) bool { + w.mu.Lock() + defer w.mu.Unlock() + if w.done { + return false + } + + w.done = true + w.ctx = nil + + w.result <- wantConnResult{cn: cn, err: err} + close(w.result) + + return true +} + +func (w *wantConn) cancel() *Conn { + w.mu.Lock() + var cn *Conn + if w.done { + select { + case result := <-w.result: + cn = result.cn + default: + } + } else { + close(w.result) + } + + w.done = true + w.ctx = nil + w.mu.Unlock() + + return cn +} + +type wantConnResult struct { + cn *Conn + err error +} + +type wantConnQueue struct { + mu sync.RWMutex + items []*wantConn +} + +func newWantConnQueue() *wantConnQueue { + return &wantConnQueue{ + items: make([]*wantConn, 0), + } +} + +func (q *wantConnQueue) enqueue(w *wantConn) { + q.mu.Lock() + defer q.mu.Unlock() + q.items = append(q.items, w) +} + +func (q *wantConnQueue) dequeue() (*wantConn, bool) { + q.mu.Lock() + defer q.mu.Unlock() + + if len(q.items) == 0 { + return nil, false + } + + item := q.items[0] + q.items = q.items[1:] + return item, true +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/proto/reader.go b/vendor/github.com/redis/go-redis/v9/internal/proto/reader.go index 8d23817fe..bac68f796 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/proto/reader.go +++ b/vendor/github.com/redis/go-redis/v9/internal/proto/reader.go @@ -12,6 +12,9 @@ import ( "github.com/redis/go-redis/v9/internal/util" ) +// DefaultBufferSize is the default size for read/write buffers (32 KiB). +const DefaultBufferSize = 32 * 1024 + // redis resp protocol data type. const ( RespStatus = '+' // +\r\n @@ -47,7 +50,8 @@ func (e RedisError) Error() string { return string(e) } func (RedisError) RedisError() {} func ParseErrorReply(line []byte) error { - return RedisError(line[1:]) + msg := string(line[1:]) + return parseTypedRedisError(msg) } //------------------------------------------------------------------------------ @@ -58,7 +62,13 @@ type Reader struct { func NewReader(rd io.Reader) *Reader { return &Reader{ - rd: bufio.NewReader(rd), + rd: bufio.NewReaderSize(rd, DefaultBufferSize), + } +} + +func NewReaderSize(rd io.Reader, size int) *Reader { + return &Reader{ + rd: bufio.NewReaderSize(rd, size), } } @@ -90,6 +100,92 @@ func (r *Reader) PeekReplyType() (byte, error) { return b[0], nil } +func (r *Reader) PeekPushNotificationName() (string, error) { + // "prime" the buffer by peeking at the next byte + c, err := r.Peek(1) + if err != nil { + return "", err + } + if c[0] != RespPush { + return "", fmt.Errorf("redis: can't peek push notification name, next reply is not a push notification") + } + + // peek 36 bytes at most, should be enough to read the push notification name + toPeek := 36 + buffered := r.Buffered() + if buffered == 0 { + return "", fmt.Errorf("redis: can't peek push notification name, no data available") + } + if buffered < toPeek { + toPeek = buffered + } + buf, err := r.rd.Peek(toPeek) + if err != nil { + return "", err + } + if buf[0] != RespPush { + return "", fmt.Errorf("redis: can't parse push notification: %q", buf) + } + + if len(buf) < 3 { + return "", fmt.Errorf("redis: can't parse push notification: %q", buf) + } + + // remove push notification type + buf = buf[1:] + // remove first line - e.g. >2\r\n + for i := 0; i < len(buf)-1; i++ { + if buf[i] == '\r' && buf[i+1] == '\n' { + buf = buf[i+2:] + break + } else { + if buf[i] < '0' || buf[i] > '9' { + return "", fmt.Errorf("redis: can't parse push notification: %q", buf) + } + } + } + if len(buf) < 2 { + return "", fmt.Errorf("redis: can't parse push notification: %q", buf) + } + // next line should be $\r\n or +\r\n + // should have the type of the push notification name and it's length + if buf[0] != RespString && buf[0] != RespStatus { + return "", fmt.Errorf("redis: can't parse push notification name: %q", buf) + } + typeOfName := buf[0] + // remove the type of the push notification name + buf = buf[1:] + if typeOfName == RespString { + // remove the length of the string + if len(buf) < 2 { + return "", fmt.Errorf("redis: can't parse push notification name: %q", buf) + } + for i := 0; i < len(buf)-1; i++ { + if buf[i] == '\r' && buf[i+1] == '\n' { + buf = buf[i+2:] + break + } else { + if buf[i] < '0' || buf[i] > '9' { + return "", fmt.Errorf("redis: can't parse push notification name: %q", buf) + } + } + } + } + + if len(buf) < 2 { + return "", fmt.Errorf("redis: can't parse push notification name: %q", buf) + } + // keep only the notification name + for i := 0; i < len(buf)-1; i++ { + if buf[i] == '\r' && buf[i+1] == '\n' { + buf = buf[:i] + break + } + } + + return util.BytesToString(buf), nil +} + // ReadLine Return a valid reply, it will check the protocol or redis error, // and discard the attribute type. func (r *Reader) ReadLine() ([]byte, error) { @@ -106,7 +202,7 @@ func (r *Reader) ReadLine() ([]byte, error) { var blobErr string blobErr, err = r.readStringReply(line) if err == nil { - err = RedisError(blobErr) + err = parseTypedRedisError(blobErr) } return nil, err case RespAttr: diff --git a/vendor/github.com/redis/go-redis/v9/internal/proto/redis_errors.go b/vendor/github.com/redis/go-redis/v9/internal/proto/redis_errors.go new file mode 100644 index 000000000..f553e2f96 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/proto/redis_errors.go @@ -0,0 +1,488 @@ +package proto + +import ( + "errors" + "strings" +) + +// Typed Redis errors for better error handling with wrapping support. +// These errors maintain backward compatibility by keeping the same error messages. + +// LoadingError is returned when Redis is loading the dataset in memory. +type LoadingError struct { + msg string +} + +func (e *LoadingError) Error() string { + return e.msg +} + +func (e *LoadingError) RedisError() {} + +// NewLoadingError creates a new LoadingError with the given message. +func NewLoadingError(msg string) *LoadingError { + return &LoadingError{msg: msg} +} + +// ReadOnlyError is returned when trying to write to a read-only replica. +type ReadOnlyError struct { + msg string +} + +func (e *ReadOnlyError) Error() string { + return e.msg +} + +func (e *ReadOnlyError) RedisError() {} + +// NewReadOnlyError creates a new ReadOnlyError with the given message. +func NewReadOnlyError(msg string) *ReadOnlyError { + return &ReadOnlyError{msg: msg} +} + +// MovedError is returned when a key has been moved to a different node in a cluster. +type MovedError struct { + msg string + addr string +} + +func (e *MovedError) Error() string { + return e.msg +} + +func (e *MovedError) RedisError() {} + +// Addr returns the address of the node where the key has been moved. +func (e *MovedError) Addr() string { + return e.addr +} + +// NewMovedError creates a new MovedError with the given message and address. +func NewMovedError(msg string, addr string) *MovedError { + return &MovedError{msg: msg, addr: addr} +} + +// AskError is returned when a key is being migrated and the client should ask another node. +type AskError struct { + msg string + addr string +} + +func (e *AskError) Error() string { + return e.msg +} + +func (e *AskError) RedisError() {} + +// Addr returns the address of the node to ask. +func (e *AskError) Addr() string { + return e.addr +} + +// NewAskError creates a new AskError with the given message and address. +func NewAskError(msg string, addr string) *AskError { + return &AskError{msg: msg, addr: addr} +} + +// ClusterDownError is returned when the cluster is down. +type ClusterDownError struct { + msg string +} + +func (e *ClusterDownError) Error() string { + return e.msg +} + +func (e *ClusterDownError) RedisError() {} + +// NewClusterDownError creates a new ClusterDownError with the given message. +func NewClusterDownError(msg string) *ClusterDownError { + return &ClusterDownError{msg: msg} +} + +// TryAgainError is returned when a command cannot be processed and should be retried. +type TryAgainError struct { + msg string +} + +func (e *TryAgainError) Error() string { + return e.msg +} + +func (e *TryAgainError) RedisError() {} + +// NewTryAgainError creates a new TryAgainError with the given message. +func NewTryAgainError(msg string) *TryAgainError { + return &TryAgainError{msg: msg} +} + +// MasterDownError is returned when the master is down. +type MasterDownError struct { + msg string +} + +func (e *MasterDownError) Error() string { + return e.msg +} + +func (e *MasterDownError) RedisError() {} + +// NewMasterDownError creates a new MasterDownError with the given message. +func NewMasterDownError(msg string) *MasterDownError { + return &MasterDownError{msg: msg} +} + +// MaxClientsError is returned when the maximum number of clients has been reached. +type MaxClientsError struct { + msg string +} + +func (e *MaxClientsError) Error() string { + return e.msg +} + +func (e *MaxClientsError) RedisError() {} + +// NewMaxClientsError creates a new MaxClientsError with the given message. +func NewMaxClientsError(msg string) *MaxClientsError { + return &MaxClientsError{msg: msg} +} + +// AuthError is returned when authentication fails. +type AuthError struct { + msg string +} + +func (e *AuthError) Error() string { + return e.msg +} + +func (e *AuthError) RedisError() {} + +// NewAuthError creates a new AuthError with the given message. +func NewAuthError(msg string) *AuthError { + return &AuthError{msg: msg} +} + +// PermissionError is returned when a user lacks required permissions. +type PermissionError struct { + msg string +} + +func (e *PermissionError) Error() string { + return e.msg +} + +func (e *PermissionError) RedisError() {} + +// NewPermissionError creates a new PermissionError with the given message. +func NewPermissionError(msg string) *PermissionError { + return &PermissionError{msg: msg} +} + +// ExecAbortError is returned when a transaction is aborted. +type ExecAbortError struct { + msg string +} + +func (e *ExecAbortError) Error() string { + return e.msg +} + +func (e *ExecAbortError) RedisError() {} + +// NewExecAbortError creates a new ExecAbortError with the given message. +func NewExecAbortError(msg string) *ExecAbortError { + return &ExecAbortError{msg: msg} +} + +// OOMError is returned when Redis is out of memory. +type OOMError struct { + msg string +} + +func (e *OOMError) Error() string { + return e.msg +} + +func (e *OOMError) RedisError() {} + +// NewOOMError creates a new OOMError with the given message. +func NewOOMError(msg string) *OOMError { + return &OOMError{msg: msg} +} + +// parseTypedRedisError parses a Redis error message and returns a typed error if applicable. +// This function maintains backward compatibility by keeping the same error messages. +func parseTypedRedisError(msg string) error { + // Check for specific error patterns and return typed errors + switch { + case strings.HasPrefix(msg, "LOADING "): + return NewLoadingError(msg) + case strings.HasPrefix(msg, "READONLY "): + return NewReadOnlyError(msg) + case strings.HasPrefix(msg, "MOVED "): + // Extract address from "MOVED " + addr := extractAddr(msg) + return NewMovedError(msg, addr) + case strings.HasPrefix(msg, "ASK "): + // Extract address from "ASK " + addr := extractAddr(msg) + return NewAskError(msg, addr) + case strings.HasPrefix(msg, "CLUSTERDOWN "): + return NewClusterDownError(msg) + case strings.HasPrefix(msg, "TRYAGAIN "): + return NewTryAgainError(msg) + case strings.HasPrefix(msg, "MASTERDOWN "): + return NewMasterDownError(msg) + case msg == "ERR max number of clients reached": + return NewMaxClientsError(msg) + case strings.HasPrefix(msg, "NOAUTH "), strings.HasPrefix(msg, "WRONGPASS "), strings.Contains(msg, "unauthenticated"): + return NewAuthError(msg) + case strings.HasPrefix(msg, "NOPERM "): + return NewPermissionError(msg) + case strings.HasPrefix(msg, "EXECABORT "): + return NewExecAbortError(msg) + case strings.HasPrefix(msg, "OOM "): + return NewOOMError(msg) + default: + // Return generic RedisError for unknown error types + return RedisError(msg) + } +} + +// extractAddr extracts the address from MOVED/ASK error messages. +// Format: "MOVED " or "ASK " +func extractAddr(msg string) string { + ind := strings.LastIndex(msg, " ") + if ind == -1 { + return "" + } + return msg[ind+1:] +} + +// IsLoadingError checks if an error is a LoadingError, even if wrapped. +func IsLoadingError(err error) bool { + if err == nil { + return false + } + var loadingErr *LoadingError + if errors.As(err, &loadingErr) { + return true + } + // Check if wrapped error is a RedisError with LOADING prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "LOADING ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "LOADING ") +} + +// IsReadOnlyError checks if an error is a ReadOnlyError, even if wrapped. +func IsReadOnlyError(err error) bool { + if err == nil { + return false + } + var readOnlyErr *ReadOnlyError + if errors.As(err, &readOnlyErr) { + return true + } + // Check if wrapped error is a RedisError with READONLY prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "READONLY ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "READONLY ") +} + +// IsMovedError checks if an error is a MovedError, even if wrapped. +// Returns the error and a boolean indicating if it's a MovedError. +func IsMovedError(err error) (*MovedError, bool) { + if err == nil { + return nil, false + } + var movedErr *MovedError + if errors.As(err, &movedErr) { + return movedErr, true + } + // Fallback to string checking for backward compatibility + s := err.Error() + if strings.HasPrefix(s, "MOVED ") { + // Parse: MOVED 3999 127.0.0.1:6381 + parts := strings.Split(s, " ") + if len(parts) == 3 { + return &MovedError{msg: s, addr: parts[2]}, true + } + } + return nil, false +} + +// IsAskError checks if an error is an AskError, even if wrapped. +// Returns the error and a boolean indicating if it's an AskError. +func IsAskError(err error) (*AskError, bool) { + if err == nil { + return nil, false + } + var askErr *AskError + if errors.As(err, &askErr) { + return askErr, true + } + // Fallback to string checking for backward compatibility + s := err.Error() + if strings.HasPrefix(s, "ASK ") { + // Parse: ASK 3999 127.0.0.1:6381 + parts := strings.Split(s, " ") + if len(parts) == 3 { + return &AskError{msg: s, addr: parts[2]}, true + } + } + return nil, false +} + +// IsClusterDownError checks if an error is a ClusterDownError, even if wrapped. +func IsClusterDownError(err error) bool { + if err == nil { + return false + } + var clusterDownErr *ClusterDownError + if errors.As(err, &clusterDownErr) { + return true + } + // Check if wrapped error is a RedisError with CLUSTERDOWN prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "CLUSTERDOWN ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "CLUSTERDOWN ") +} + +// IsTryAgainError checks if an error is a TryAgainError, even if wrapped. +func IsTryAgainError(err error) bool { + if err == nil { + return false + } + var tryAgainErr *TryAgainError + if errors.As(err, &tryAgainErr) { + return true + } + // Check if wrapped error is a RedisError with TRYAGAIN prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "TRYAGAIN ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "TRYAGAIN ") +} + +// IsMasterDownError checks if an error is a MasterDownError, even if wrapped. +func IsMasterDownError(err error) bool { + if err == nil { + return false + } + var masterDownErr *MasterDownError + if errors.As(err, &masterDownErr) { + return true + } + // Check if wrapped error is a RedisError with MASTERDOWN prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "MASTERDOWN ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "MASTERDOWN ") +} + +// IsMaxClientsError checks if an error is a MaxClientsError, even if wrapped. +func IsMaxClientsError(err error) bool { + if err == nil { + return false + } + var maxClientsErr *MaxClientsError + if errors.As(err, &maxClientsErr) { + return true + } + // Check if wrapped error is a RedisError with max clients prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "ERR max number of clients reached") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "ERR max number of clients reached") +} + +// IsAuthError checks if an error is an AuthError, even if wrapped. +func IsAuthError(err error) bool { + if err == nil { + return false + } + var authErr *AuthError + if errors.As(err, &authErr) { + return true + } + // Check if wrapped error is a RedisError with auth error prefix + var redisErr RedisError + if errors.As(err, &redisErr) { + s := redisErr.Error() + return strings.HasPrefix(s, "NOAUTH ") || strings.HasPrefix(s, "WRONGPASS ") || strings.Contains(s, "unauthenticated") + } + // Fallback to string checking for backward compatibility + s := err.Error() + return strings.HasPrefix(s, "NOAUTH ") || strings.HasPrefix(s, "WRONGPASS ") || strings.Contains(s, "unauthenticated") +} + +// IsPermissionError checks if an error is a PermissionError, even if wrapped. +func IsPermissionError(err error) bool { + if err == nil { + return false + } + var permErr *PermissionError + if errors.As(err, &permErr) { + return true + } + // Check if wrapped error is a RedisError with NOPERM prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "NOPERM ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "NOPERM ") +} + +// IsExecAbortError checks if an error is an ExecAbortError, even if wrapped. +func IsExecAbortError(err error) bool { + if err == nil { + return false + } + var execAbortErr *ExecAbortError + if errors.As(err, &execAbortErr) { + return true + } + // Check if wrapped error is a RedisError with EXECABORT prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "EXECABORT ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "EXECABORT ") +} + +// IsOOMError checks if an error is an OOMError, even if wrapped. +func IsOOMError(err error) bool { + if err == nil { + return false + } + var oomErr *OOMError + if errors.As(err, &oomErr) { + return true + } + // Check if wrapped error is a RedisError with OOM prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "OOM ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "OOM ") +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go b/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go index 78595cc4f..38e66c688 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go +++ b/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go @@ -66,56 +66,95 @@ func (w *Writer) WriteArg(v interface{}) error { case string: return w.string(v) case *string: + if v == nil { + return w.string("") + } return w.string(*v) case []byte: return w.bytes(v) case int: return w.int(int64(v)) case *int: + if v == nil { + return w.int(0) + } return w.int(int64(*v)) case int8: return w.int(int64(v)) case *int8: + if v == nil { + return w.int(0) + } return w.int(int64(*v)) case int16: return w.int(int64(v)) case *int16: + if v == nil { + return w.int(0) + } return w.int(int64(*v)) case int32: return w.int(int64(v)) case *int32: + if v == nil { + return w.int(0) + } return w.int(int64(*v)) case int64: return w.int(v) case *int64: + if v == nil { + return w.int(0) + } return w.int(*v) case uint: return w.uint(uint64(v)) case *uint: + if v == nil { + return w.uint(0) + } return w.uint(uint64(*v)) case uint8: return w.uint(uint64(v)) case *uint8: + if v == nil { + return w.string("") + } return w.uint(uint64(*v)) case uint16: return w.uint(uint64(v)) case *uint16: + if v == nil { + return w.uint(0) + } return w.uint(uint64(*v)) case uint32: return w.uint(uint64(v)) case *uint32: + if v == nil { + return w.uint(0) + } return w.uint(uint64(*v)) case uint64: return w.uint(v) case *uint64: + if v == nil { + return w.uint(0) + } return w.uint(*v) case float32: return w.float(float64(v)) case *float32: + if v == nil { + return w.float(0) + } return w.float(float64(*v)) case float64: return w.float(v) case *float64: + if v == nil { + return w.float(0) + } return w.float(*v) case bool: if v { @@ -123,6 +162,9 @@ func (w *Writer) WriteArg(v interface{}) error { } return w.int(0) case *bool: + if v == nil { + return w.int(0) + } if *v { return w.int(1) } @@ -130,8 +172,19 @@ func (w *Writer) WriteArg(v interface{}) error { case time.Time: w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano) return w.bytes(w.numBuf) + case *time.Time: + if v == nil { + v = &time.Time{} + } + w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano) + return w.bytes(w.numBuf) case time.Duration: return w.int(v.Nanoseconds()) + case *time.Duration: + if v == nil { + return w.int(0) + } + return w.int(v.Nanoseconds()) case encoding.BinaryMarshaler: b, err := v.MarshalBinary() if err != nil { diff --git a/vendor/github.com/redis/go-redis/v9/internal/redis.go b/vendor/github.com/redis/go-redis/v9/internal/redis.go new file mode 100644 index 000000000..190bbebea --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/redis.go @@ -0,0 +1,3 @@ +package internal + +const RedisNull = "" diff --git a/vendor/github.com/redis/go-redis/v9/internal/semaphore.go b/vendor/github.com/redis/go-redis/v9/internal/semaphore.go new file mode 100644 index 000000000..a1dfca5ff --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/semaphore.go @@ -0,0 +1,193 @@ +package internal + +import ( + "context" + "sync" + "time" +) + +var semTimers = sync.Pool{ + New: func() interface{} { + t := time.NewTimer(time.Hour) + t.Stop() + return t + }, +} + +// FastSemaphore is a channel-based semaphore optimized for performance. +// It uses a fast path that avoids timer allocation when tokens are available. +// The channel is pre-filled with tokens: Acquire = receive, Release = send. +// Closing the semaphore unblocks all waiting goroutines. +// +// Performance: ~30 ns/op with zero allocations on fast path. +// Fairness: Eventual fairness (no starvation) but not strict FIFO. +type FastSemaphore struct { + tokens chan struct{} + max int32 +} + +// NewFastSemaphore creates a new fast semaphore with the given capacity. +func NewFastSemaphore(capacity int32) *FastSemaphore { + ch := make(chan struct{}, capacity) + // Pre-fill with tokens + for i := int32(0); i < capacity; i++ { + ch <- struct{}{} + } + return &FastSemaphore{ + tokens: ch, + max: capacity, + } +} + +// TryAcquire attempts to acquire a token without blocking. +// Returns true if successful, false if no tokens available. +func (s *FastSemaphore) TryAcquire() bool { + select { + case <-s.tokens: + return true + default: + return false + } +} + +// Acquire acquires a token, blocking if necessary until one is available. +// Returns an error if the context is cancelled or the timeout expires. +// Uses a fast path to avoid timer allocation when tokens are immediately available. +func (s *FastSemaphore) Acquire(ctx context.Context, timeout time.Duration, timeoutErr error) error { + // Check context first + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Try fast path first (no timer needed) + select { + case <-s.tokens: + return nil + default: + } + + // Slow path: need to wait with timeout + timer := semTimers.Get().(*time.Timer) + defer semTimers.Put(timer) + timer.Reset(timeout) + + select { + case <-s.tokens: + if !timer.Stop() { + <-timer.C + } + return nil + case <-ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return ctx.Err() + case <-timer.C: + return timeoutErr + } +} + +// AcquireBlocking acquires a token, blocking indefinitely until one is available. +func (s *FastSemaphore) AcquireBlocking() { + <-s.tokens +} + +// Release releases a token back to the semaphore. +func (s *FastSemaphore) Release() { + s.tokens <- struct{}{} +} + +// Close closes the semaphore, unblocking all waiting goroutines. +// After close, all Acquire calls will receive a closed channel signal. +func (s *FastSemaphore) Close() { + close(s.tokens) +} + +// Len returns the current number of acquired tokens. +func (s *FastSemaphore) Len() int32 { + return s.max - int32(len(s.tokens)) +} + +// FIFOSemaphore is a channel-based semaphore with strict FIFO ordering. +// Unlike FastSemaphore, this guarantees that threads are served in the exact order they call Acquire(). +// The channel is pre-filled with tokens: Acquire = receive, Release = send. +// Closing the semaphore unblocks all waiting goroutines. +// +// Performance: ~115 ns/op with zero allocations (slower than FastSemaphore due to timer allocation). +// Fairness: Strict FIFO ordering guaranteed by Go runtime. +type FIFOSemaphore struct { + tokens chan struct{} + max int32 +} + +// NewFIFOSemaphore creates a new FIFO semaphore with the given capacity. +func NewFIFOSemaphore(capacity int32) *FIFOSemaphore { + ch := make(chan struct{}, capacity) + // Pre-fill with tokens + for i := int32(0); i < capacity; i++ { + ch <- struct{}{} + } + return &FIFOSemaphore{ + tokens: ch, + max: capacity, + } +} + +// TryAcquire attempts to acquire a token without blocking. +// Returns true if successful, false if no tokens available. +func (s *FIFOSemaphore) TryAcquire() bool { + select { + case <-s.tokens: + return true + default: + return false + } +} + +// Acquire acquires a token, blocking if necessary until one is available. +// Returns an error if the context is cancelled or the timeout expires. +// Always uses timer to guarantee FIFO ordering (no fast path). +func (s *FIFOSemaphore) Acquire(ctx context.Context, timeout time.Duration, timeoutErr error) error { + // No fast path - always use timer to guarantee FIFO + timer := semTimers.Get().(*time.Timer) + defer semTimers.Put(timer) + timer.Reset(timeout) + + select { + case <-s.tokens: + if !timer.Stop() { + <-timer.C + } + return nil + case <-ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return ctx.Err() + case <-timer.C: + return timeoutErr + } +} + +// AcquireBlocking acquires a token, blocking indefinitely until one is available. +func (s *FIFOSemaphore) AcquireBlocking() { + <-s.tokens +} + +// Release releases a token back to the semaphore. +func (s *FIFOSemaphore) Release() { + s.tokens <- struct{}{} +} + +// Close closes the semaphore, unblocking all waiting goroutines. +// After close, all Acquire calls will receive a closed channel signal. +func (s *FIFOSemaphore) Close() { + close(s.tokens) +} + +// Len returns the current number of acquired tokens. +func (s *FIFOSemaphore) Len() int32 { + return s.max - int32(len(s.tokens)) +} \ No newline at end of file diff --git a/vendor/github.com/redis/go-redis/v9/internal/util.go b/vendor/github.com/redis/go-redis/v9/internal/util.go index 235a91afa..f77775ff4 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/util.go +++ b/vendor/github.com/redis/go-redis/v9/internal/util.go @@ -3,6 +3,7 @@ package internal import ( "context" "net" + "strconv" "strings" "time" @@ -48,22 +49,7 @@ func isLower(s string) bool { } func ReplaceSpaces(s string) string { - // Pre-allocate a builder with the same length as s to minimize allocations. - // This is a basic optimization; adjust the initial size based on your use case. - var builder strings.Builder - builder.Grow(len(s)) - - for _, char := range s { - if char == ' ' { - // Replace space with a hyphen. - builder.WriteRune('-') - } else { - // Copy the character as-is. - builder.WriteRune(char) - } - } - - return builder.String() + return strings.ReplaceAll(s, " ", "-") } func GetAddr(addr string) string { @@ -81,3 +67,47 @@ func GetAddr(addr string) string { } return net.JoinHostPort(addr[:ind], addr[ind+1:]) } + +func ToInteger(val interface{}) int { + switch v := val.(type) { + case int: + return v + case int64: + return int(v) + case string: + i, _ := strconv.Atoi(v) + return i + default: + return 0 + } +} + +func ToFloat(val interface{}) float64 { + switch v := val.(type) { + case float64: + return v + case string: + f, _ := strconv.ParseFloat(v, 64) + return f + default: + return 0.0 + } +} + +func ToString(val interface{}) string { + if str, ok := val.(string); ok { + return str + } + return "" +} + +func ToStringSlice(val interface{}) []string { + if arr, ok := val.([]interface{}); ok { + result := make([]string, len(arr)) + for i, v := range arr { + result[i] = ToString(v) + } + return result + } + return nil +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/util/convert.go b/vendor/github.com/redis/go-redis/v9/internal/util/convert.go new file mode 100644 index 000000000..b743a4f0e --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/util/convert.go @@ -0,0 +1,41 @@ +package util + +import ( + "fmt" + "math" + "strconv" +) + +// ParseFloat parses a Redis RESP3 float reply into a Go float64, +// handling "inf", "-inf", "nan" per Redis conventions. +func ParseStringToFloat(s string) (float64, error) { + switch s { + case "inf": + return math.Inf(1), nil + case "-inf": + return math.Inf(-1), nil + case "nan", "-nan": + return math.NaN(), nil + } + return strconv.ParseFloat(s, 64) +} + +// MustParseFloat is like ParseFloat but panics on parse errors. +func MustParseFloat(s string) float64 { + f, err := ParseStringToFloat(s) + if err != nil { + panic(fmt.Sprintf("redis: failed to parse float %q: %v", s, err)) + } + return f +} + +// SafeIntToInt32 safely converts an int to int32, returning an error if overflow would occur. +func SafeIntToInt32(value int, fieldName string) (int32, error) { + if value > math.MaxInt32 { + return 0, fmt.Errorf("redis: %s value %d exceeds maximum allowed value %d", fieldName, value, math.MaxInt32) + } + if value < math.MinInt32 { + return 0, fmt.Errorf("redis: %s value %d is below minimum allowed value %d", fieldName, value, math.MinInt32) + } + return int32(value), nil +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/util/math.go b/vendor/github.com/redis/go-redis/v9/internal/util/math.go new file mode 100644 index 000000000..e707c47a6 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/util/math.go @@ -0,0 +1,17 @@ +package util + +// Max returns the maximum of two integers +func Max(a, b int) int { + if a > b { + return a + } + return b +} + +// Min returns the minimum of two integers +func Min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/redis/go-redis/v9/json.go b/vendor/github.com/redis/go-redis/v9/json.go index ca731db3a..2b9fa527e 100644 --- a/vendor/github.com/redis/go-redis/v9/json.go +++ b/vendor/github.com/redis/go-redis/v9/json.go @@ -60,7 +60,7 @@ type JSONArrTrimArgs struct { type JSONCmd struct { baseCmd val string - expanded []interface{} + expanded interface{} } var _ Cmder = (*JSONCmd)(nil) @@ -82,6 +82,7 @@ func (cmd *JSONCmd) SetVal(val string) { cmd.val = val } +// Val returns the result of the JSON.GET command as a string. func (cmd *JSONCmd) Val() string { if len(cmd.val) == 0 && cmd.expanded != nil { val, err := json.Marshal(cmd.expanded) @@ -100,11 +101,12 @@ func (cmd *JSONCmd) Result() (string, error) { return cmd.Val(), cmd.Err() } -func (cmd JSONCmd) Expanded() (interface{}, error) { +// Expanded returns the result of the JSON.GET command as unmarshalled JSON. +func (cmd *JSONCmd) Expanded() (interface{}, error) { if len(cmd.val) != 0 && cmd.expanded == nil { err := json.Unmarshal([]byte(cmd.val), &cmd.expanded) if err != nil { - return "", err + return nil, err } } @@ -113,11 +115,17 @@ func (cmd JSONCmd) Expanded() (interface{}, error) { func (cmd *JSONCmd) readReply(rd *proto.Reader) error { // nil response from JSON.(M)GET (cmd.baseCmd.err will be "redis: nil") + // This happens when the key doesn't exist if cmd.baseCmd.Err() == Nil { cmd.val = "" return Nil } + // Handle other base command errors + if cmd.baseCmd.Err() != nil { + return cmd.baseCmd.Err() + } + if readType, err := rd.PeekReplyType(); err != nil { return err } else if readType == proto.RespArray { @@ -127,6 +135,13 @@ func (cmd *JSONCmd) readReply(rd *proto.Reader) error { return err } + // Empty array means no results found for JSON path, but key exists + // This should return "[]", not an error + if size == 0 { + cmd.val = "[]" + return nil + } + expanded := make([]interface{}, size) for i := 0; i < size; i++ { @@ -141,6 +156,7 @@ func (cmd *JSONCmd) readReply(rd *proto.Reader) error { return err } else if str == "" || err == Nil { cmd.val = "" + return Nil } else { cmd.val = str } @@ -494,7 +510,7 @@ func (c cmdable) JSONMSet(ctx context.Context, params ...interface{}) *StatusCmd } // JSONNumIncrBy increments the number value stored at the specified path by the provided number. -// For more information, see https://redis.io/commands/json.numincreby +// For more information, see https://redis.io/docs/latest/commands/json.numincrby/ func (c cmdable) JSONNumIncrBy(ctx context.Context, key, path string, value float64) *JSONCmd { args := []interface{}{"JSON.NUMINCRBY", key, path, value} cmd := newJSONCmd(ctx, args...) diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/FEATURES.md b/vendor/github.com/redis/go-redis/v9/maintnotifications/FEATURES.md new file mode 100644 index 000000000..caa4f705e --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/FEATURES.md @@ -0,0 +1,218 @@ +# Maintenance Notifications - FEATURES + +## Overview + +The Maintenance Notifications feature enables seamless Redis connection handoffs during cluster maintenance operations without dropping active connections. This feature leverages Redis RESP3 push notifications to provide zero-downtime maintenance for Redis Enterprise and compatible Redis deployments. + +## Important + +Using Maintenance Notifications may affect the read and write timeouts by relaxing them during maintenance operations. +This is necessary to prevent false failures due to increased latency during handoffs. The relaxed timeouts are automatically applied and removed as needed. + +## Key Features + +### Seamless Connection Handoffs +- **Zero-Downtime Maintenance**: Automatically handles connection transitions during cluster operations +- **Active Operation Preservation**: Transfers in-flight operations to new connections without interruption +- **Graceful Degradation**: Falls back to standard reconnection if handoff fails + +### Push Notification Support +Supports all Redis Enterprise maintenance notification types: +- **MOVING** - Slot moving to a new node +- **MIGRATING** - Slot in migration state +- **MIGRATED** - Migration completed +- **FAILING_OVER** - Node failing over +- **FAILED_OVER** - Failover completed + +### Circuit Breaker Pattern +- **Endpoint-Specific Failure Tracking**: Prevents repeated connection attempts to failing endpoints +- **Automatic Recovery Testing**: Half-open state allows gradual recovery validation +- **Configurable Thresholds**: Customize failure thresholds and reset timeouts + +### Flexible Configuration +- **Auto-Detection Mode**: Automatically detects server support for maintenance notifications +- **Multiple Endpoint Types**: Support for internal/external IP/FQDN endpoint resolution +- **Auto-Scaling Workers**: Automatically sizes worker pool based on connection pool size +- **Timeout Management**: Separate timeouts for relaxed (during maintenance) and normal operations + +### Extensible Hook System +- **Pre/Post Processing Hooks**: Monitor and customize notification handling +- **Built-in Hooks**: Logging and metrics collection hooks included +- **Custom Hook Support**: Implement custom business logic around maintenance events + +### Comprehensive Monitoring +- **Metrics Collection**: Track notification counts, processing times, and error rates +- **Circuit Breaker Stats**: Monitor endpoint health and circuit breaker states +- **Operation Tracking**: Track active handoff operations and their lifecycle + +## Architecture Highlights + +### Event-Driven Handoff System +- **Asynchronous Processing**: Non-blocking handoff operations using worker pool pattern +- **Queue-Based Architecture**: Configurable queue size with auto-scaling support +- **Retry Mechanism**: Configurable retry attempts with exponential backoff + +### Connection Pool Integration +- **Pool Hook Interface**: Seamless integration with go-redis connection pool +- **Connection State Management**: Atomic flags for connection usability tracking +- **Graceful Shutdown**: Ensures all in-flight handoffs complete before shutdown + +### Thread-Safe Design +- **Lock-Free Operations**: Atomic operations for high-performance state tracking +- **Concurrent-Safe Maps**: sync.Map for tracking active operations +- **Minimal Lock Contention**: Read-write locks only where necessary + +## Configuration Options + +### Operation Modes +- **`ModeDisabled`**: Maintenance notifications completely disabled +- **`ModeEnabled`**: Forcefully enabled (fails if server doesn't support) +- **`ModeAuto`**: Auto-detect server support (recommended default) + +### Endpoint Types +- **`EndpointTypeAuto`**: Auto-detect based on current connection +- **`EndpointTypeInternalIP`**: Use internal IP addresses +- **`EndpointTypeInternalFQDN`**: Use internal fully qualified domain names +- **`EndpointTypeExternalIP`**: Use external IP addresses +- **`EndpointTypeExternalFQDN`**: Use external fully qualified domain names +- **`EndpointTypeNone`**: No endpoint (reconnect with current configuration) + +### Timeout Configuration +- **`RelaxedTimeout`**: Extended timeout during maintenance operations (default: 10s) +- **`HandoffTimeout`**: Maximum time for handoff completion (default: 15s) +- **`PostHandoffRelaxedDuration`**: Relaxed period after handoff (default: 2×RelaxedTimeout) + +### Worker Pool Configuration +- **`MaxWorkers`**: Maximum concurrent handoff workers (auto-calculated if 0) +- **`HandoffQueueSize`**: Handoff queue capacity (auto-calculated if 0) +- **`MaxHandoffRetries`**: Maximum retry attempts for failed handoffs (default: 3) + +### Circuit Breaker Configuration +- **`CircuitBreakerFailureThreshold`**: Failures before opening circuit (default: 5) +- **`CircuitBreakerResetTimeout`**: Time before testing recovery (default: 60s) +- **`CircuitBreakerMaxRequests`**: Max requests in half-open state (default: 3) + +## Auto-Scaling Formulas + +### Worker Pool Sizing +When `MaxWorkers = 0` (auto-calculate): +``` +MaxWorkers = min(PoolSize/2, max(10, PoolSize/3)) +``` + +### Queue Sizing +When `HandoffQueueSize = 0` (auto-calculate): +``` +QueueSize = max(20 × MaxWorkers, PoolSize) +Capped by: min(MaxActiveConns + 1, 5 × PoolSize) +``` + +### Examples +- **Pool Size 100**: 33 workers, 660 queue (capped at 500) +- **Pool Size 100 + MaxActiveConns 150**: 33 workers, 151 queue +- **Pool Size 50**: 16 workers, 320 queue (capped at 250) + +## Performance Characteristics + +### Throughput +- **Non-Blocking Handoffs**: Client operations continue during handoffs +- **Concurrent Processing**: Multiple handoffs processed in parallel +- **Minimal Overhead**: Lock-free atomic operations for state tracking + +### Latency +- **Relaxed Timeouts**: Extended timeouts during maintenance prevent false failures +- **Fast Path**: Connections not undergoing handoff have zero overhead +- **Graceful Degradation**: Failed handoffs fall back to standard reconnection + +### Resource Usage +- **Memory Efficient**: Bounded queue sizes prevent memory exhaustion +- **Worker Pool**: Fixed worker count prevents goroutine explosion +- **Connection Reuse**: Handoff reuses existing connection objects + +## Testing + +### Unit Tests +- Comprehensive unit test coverage for all components +- Mock-based testing for isolation +- Concurrent operation testing + +### Integration Tests +- Pool integration tests with real connection handoffs +- Circuit breaker behavior validation +- Hook system integration testing + +### E2E Tests +- Real Redis Enterprise cluster testing +- Multiple scenario coverage (timeouts, endpoint types, stress tests) +- Fault injection testing +- TLS configuration testing + +## Compatibility + +### Requirements +- **Redis Protocol**: RESP3 required for push notifications +- **Redis Version**: Redis Enterprise or compatible Redis with maintenance notifications +- **Go Version**: Go 1.18+ (uses generics and atomic types) + +### Client Support +#### Currently Supported +- **Standalone Client** (`redis.NewClient`) + +#### Planned Support +- **Cluster Client** (not yet supported) + +#### Will Not Support +- **Failover Client** (no planned support) +- **Ring Client** (no planned support) + +## Migration Guide + +### Enabling Maintenance Notifications + +**Before:** +```go +client := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Protocol: 2, // RESP2 +}) +``` + +**After:** +```go +client := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Protocol: 3, // RESP3 required + MaintNotificationsConfig: &maintnotifications.Config{ + Mode: maintnotifications.ModeAuto, + }, +}) +``` + +### Adding Monitoring + +```go +// Get the manager from the client +manager := client.GetMaintNotificationsManager() +if manager != nil { + // Add logging hook + loggingHook := maintnotifications.NewLoggingHook(2) // Info level + manager.AddNotificationHook(loggingHook) + + // Add metrics hook + metricsHook := maintnotifications.NewMetricsHook() + manager.AddNotificationHook(metricsHook) +} +``` + +## Known Limitations + +1. **Standalone Only**: Currently only supported in standalone Redis clients +2. **RESP3 Required**: Push notifications require RESP3 protocol +3. **Server Support**: Requires Redis Enterprise or compatible Redis with maintenance notifications +4. **Single Connection Commands**: Some commands (MULTI/EXEC, WATCH) may need special handling +5. **No Failover/Ring Client Support**: Failover and Ring clients are not supported and there are no plans to add support + +## Future Enhancements + +- Cluster client support +- Enhanced metrics and observability \ No newline at end of file diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/README.md b/vendor/github.com/redis/go-redis/v9/maintnotifications/README.md new file mode 100644 index 000000000..2ac6b9cb1 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/README.md @@ -0,0 +1,67 @@ +# Maintenance Notifications + +Seamless Redis connection handoffs during cluster maintenance operations without dropping connections. + +## ⚠️ **Important Note** +**Maintenance notifications are currently supported only in standalone Redis clients.** Cluster clients (ClusterClient, FailoverClient, etc.) do not yet support this functionality. + +## Quick Start + +```go +client := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Protocol: 3, // RESP3 required + MaintNotificationsConfig: &maintnotifications.Config{ + Mode: maintnotifications.ModeEnabled, + }, +}) +``` + +## Modes + +- **`ModeDisabled`** - Maintenance notifications disabled +- **`ModeEnabled`** - Forcefully enabled (fails if server doesn't support) +- **`ModeAuto`** - Auto-detect server support (default) + +## Configuration + +```go +&maintnotifications.Config{ + Mode: maintnotifications.ModeAuto, + EndpointType: maintnotifications.EndpointTypeAuto, + RelaxedTimeout: 10 * time.Second, + HandoffTimeout: 15 * time.Second, + MaxHandoffRetries: 3, + MaxWorkers: 0, // Auto-calculated + HandoffQueueSize: 0, // Auto-calculated + PostHandoffRelaxedDuration: 0, // 2 * RelaxedTimeout +} +``` + +### Endpoint Types + +- **`EndpointTypeAuto`** - Auto-detect based on connection (default) +- **`EndpointTypeInternalIP`** - Internal IP address +- **`EndpointTypeInternalFQDN`** - Internal FQDN +- **`EndpointTypeExternalIP`** - External IP address +- **`EndpointTypeExternalFQDN`** - External FQDN +- **`EndpointTypeNone`** - No endpoint (reconnect with current config) + +### Auto-Scaling + +**Workers**: `min(PoolSize/2, max(10, PoolSize/3))` when auto-calculated +**Queue**: `max(20×Workers, PoolSize)` capped by `MaxActiveConns+1` or `5×PoolSize` + +**Examples:** +- Pool 100: 33 workers, 660 queue (capped at 500) +- Pool 100 + MaxActiveConns 150: 33 workers, 151 queue + +## How It Works + +1. Redis sends push notifications about cluster maintenance operations +2. Client creates new connections to updated endpoints +3. Active operations transfer to new connections +4. Old connections close gracefully + + +## For more information, see [FEATURES](FEATURES.md) diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/circuit_breaker.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/circuit_breaker.go new file mode 100644 index 000000000..cb76b6447 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/circuit_breaker.go @@ -0,0 +1,353 @@ +package maintnotifications + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" +) + +// CircuitBreakerState represents the state of a circuit breaker +type CircuitBreakerState int32 + +const ( + // CircuitBreakerClosed - normal operation, requests allowed + CircuitBreakerClosed CircuitBreakerState = iota + // CircuitBreakerOpen - failing fast, requests rejected + CircuitBreakerOpen + // CircuitBreakerHalfOpen - testing if service recovered + CircuitBreakerHalfOpen +) + +func (s CircuitBreakerState) String() string { + switch s { + case CircuitBreakerClosed: + return "closed" + case CircuitBreakerOpen: + return "open" + case CircuitBreakerHalfOpen: + return "half-open" + default: + return "unknown" + } +} + +// CircuitBreaker implements the circuit breaker pattern for endpoint-specific failure handling +type CircuitBreaker struct { + // Configuration + failureThreshold int // Number of failures before opening + resetTimeout time.Duration // How long to stay open before testing + maxRequests int // Max requests allowed in half-open state + + // State tracking (atomic for lock-free access) + state atomic.Int32 // CircuitBreakerState + failures atomic.Int64 // Current failure count + successes atomic.Int64 // Success count in half-open state + requests atomic.Int64 // Request count in half-open state + lastFailureTime atomic.Int64 // Unix timestamp of last failure + lastSuccessTime atomic.Int64 // Unix timestamp of last success + + // Endpoint identification + endpoint string + config *Config +} + +// newCircuitBreaker creates a new circuit breaker for an endpoint +func newCircuitBreaker(endpoint string, config *Config) *CircuitBreaker { + // Use configuration values with sensible defaults + failureThreshold := 5 + resetTimeout := 60 * time.Second + maxRequests := 3 + + if config != nil { + failureThreshold = config.CircuitBreakerFailureThreshold + resetTimeout = config.CircuitBreakerResetTimeout + maxRequests = config.CircuitBreakerMaxRequests + } + + return &CircuitBreaker{ + failureThreshold: failureThreshold, + resetTimeout: resetTimeout, + maxRequests: maxRequests, + endpoint: endpoint, + config: config, + state: atomic.Int32{}, // Defaults to CircuitBreakerClosed (0) + } +} + +// IsOpen returns true if the circuit breaker is open (rejecting requests) +func (cb *CircuitBreaker) IsOpen() bool { + state := CircuitBreakerState(cb.state.Load()) + return state == CircuitBreakerOpen +} + +// shouldAttemptReset checks if enough time has passed to attempt reset +func (cb *CircuitBreaker) shouldAttemptReset() bool { + lastFailure := time.Unix(cb.lastFailureTime.Load(), 0) + return time.Since(lastFailure) >= cb.resetTimeout +} + +// Execute runs the given function with circuit breaker protection +func (cb *CircuitBreaker) Execute(fn func() error) error { + // Single atomic state load for consistency + state := CircuitBreakerState(cb.state.Load()) + + switch state { + case CircuitBreakerOpen: + if cb.shouldAttemptReset() { + // Attempt transition to half-open + if cb.state.CompareAndSwap(int32(CircuitBreakerOpen), int32(CircuitBreakerHalfOpen)) { + cb.requests.Store(0) + cb.successes.Store(0) + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.CircuitBreakerTransitioningToHalfOpen(cb.endpoint)) + } + // Fall through to half-open logic + } else { + return ErrCircuitBreakerOpen + } + } else { + return ErrCircuitBreakerOpen + } + fallthrough + case CircuitBreakerHalfOpen: + requests := cb.requests.Add(1) + if requests > int64(cb.maxRequests) { + cb.requests.Add(-1) // Revert the increment + return ErrCircuitBreakerOpen + } + } + + // Execute the function with consistent state + err := fn() + + if err != nil { + cb.recordFailure() + return err + } + + cb.recordSuccess() + return nil +} + +// recordFailure records a failure and potentially opens the circuit +func (cb *CircuitBreaker) recordFailure() { + cb.lastFailureTime.Store(time.Now().Unix()) + failures := cb.failures.Add(1) + + state := CircuitBreakerState(cb.state.Load()) + + switch state { + case CircuitBreakerClosed: + if failures >= int64(cb.failureThreshold) { + if cb.state.CompareAndSwap(int32(CircuitBreakerClosed), int32(CircuitBreakerOpen)) { + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(context.Background(), logs.CircuitBreakerOpened(cb.endpoint, failures)) + } + } + } + case CircuitBreakerHalfOpen: + // Any failure in half-open state immediately opens the circuit + if cb.state.CompareAndSwap(int32(CircuitBreakerHalfOpen), int32(CircuitBreakerOpen)) { + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(context.Background(), logs.CircuitBreakerReopened(cb.endpoint)) + } + } + } +} + +// recordSuccess records a success and potentially closes the circuit +func (cb *CircuitBreaker) recordSuccess() { + cb.lastSuccessTime.Store(time.Now().Unix()) + + state := CircuitBreakerState(cb.state.Load()) + + switch state { + case CircuitBreakerClosed: + // Reset failure count on success in closed state + cb.failures.Store(0) + case CircuitBreakerHalfOpen: + successes := cb.successes.Add(1) + + // If we've had enough successful requests, close the circuit + if successes >= int64(cb.maxRequests) { + if cb.state.CompareAndSwap(int32(CircuitBreakerHalfOpen), int32(CircuitBreakerClosed)) { + cb.failures.Store(0) + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.CircuitBreakerClosed(cb.endpoint, successes)) + } + } + } + } +} + +// GetState returns the current state of the circuit breaker +func (cb *CircuitBreaker) GetState() CircuitBreakerState { + return CircuitBreakerState(cb.state.Load()) +} + +// GetStats returns current statistics for monitoring +func (cb *CircuitBreaker) GetStats() CircuitBreakerStats { + return CircuitBreakerStats{ + Endpoint: cb.endpoint, + State: cb.GetState(), + Failures: cb.failures.Load(), + Successes: cb.successes.Load(), + Requests: cb.requests.Load(), + LastFailureTime: time.Unix(cb.lastFailureTime.Load(), 0), + LastSuccessTime: time.Unix(cb.lastSuccessTime.Load(), 0), + } +} + +// CircuitBreakerStats provides statistics about a circuit breaker +type CircuitBreakerStats struct { + Endpoint string + State CircuitBreakerState + Failures int64 + Successes int64 + Requests int64 + LastFailureTime time.Time + LastSuccessTime time.Time +} + +// CircuitBreakerEntry wraps a circuit breaker with access tracking +type CircuitBreakerEntry struct { + breaker *CircuitBreaker + lastAccess atomic.Int64 // Unix timestamp + created time.Time +} + +// CircuitBreakerManager manages circuit breakers for multiple endpoints +type CircuitBreakerManager struct { + breakers sync.Map // map[string]*CircuitBreakerEntry + config *Config + cleanupStop chan struct{} + cleanupMu sync.Mutex + lastCleanup atomic.Int64 // Unix timestamp +} + +// newCircuitBreakerManager creates a new circuit breaker manager +func newCircuitBreakerManager(config *Config) *CircuitBreakerManager { + cbm := &CircuitBreakerManager{ + config: config, + cleanupStop: make(chan struct{}), + } + cbm.lastCleanup.Store(time.Now().Unix()) + + // Start background cleanup goroutine + go cbm.cleanupLoop() + + return cbm +} + +// GetCircuitBreaker returns the circuit breaker for an endpoint, creating it if necessary +func (cbm *CircuitBreakerManager) GetCircuitBreaker(endpoint string) *CircuitBreaker { + now := time.Now().Unix() + + if entry, ok := cbm.breakers.Load(endpoint); ok { + cbEntry := entry.(*CircuitBreakerEntry) + cbEntry.lastAccess.Store(now) + return cbEntry.breaker + } + + // Create new circuit breaker with metadata + newBreaker := newCircuitBreaker(endpoint, cbm.config) + newEntry := &CircuitBreakerEntry{ + breaker: newBreaker, + created: time.Now(), + } + newEntry.lastAccess.Store(now) + + actual, _ := cbm.breakers.LoadOrStore(endpoint, newEntry) + return actual.(*CircuitBreakerEntry).breaker +} + +// GetAllStats returns statistics for all circuit breakers +func (cbm *CircuitBreakerManager) GetAllStats() []CircuitBreakerStats { + var stats []CircuitBreakerStats + cbm.breakers.Range(func(key, value interface{}) bool { + entry := value.(*CircuitBreakerEntry) + stats = append(stats, entry.breaker.GetStats()) + return true + }) + return stats +} + +// cleanupLoop runs background cleanup of unused circuit breakers +func (cbm *CircuitBreakerManager) cleanupLoop() { + ticker := time.NewTicker(5 * time.Minute) // Cleanup every 5 minutes + defer ticker.Stop() + + for { + select { + case <-ticker.C: + cbm.cleanup() + case <-cbm.cleanupStop: + return + } + } +} + +// cleanup removes circuit breakers that haven't been accessed recently +func (cbm *CircuitBreakerManager) cleanup() { + // Prevent concurrent cleanups + if !cbm.cleanupMu.TryLock() { + return + } + defer cbm.cleanupMu.Unlock() + + now := time.Now() + cutoff := now.Add(-30 * time.Minute).Unix() // 30 minute TTL + + var toDelete []string + count := 0 + + cbm.breakers.Range(func(key, value interface{}) bool { + endpoint := key.(string) + entry := value.(*CircuitBreakerEntry) + + count++ + + // Remove if not accessed recently + if entry.lastAccess.Load() < cutoff { + toDelete = append(toDelete, endpoint) + } + + return true + }) + + // Delete expired entries + for _, endpoint := range toDelete { + cbm.breakers.Delete(endpoint) + } + + // Log cleanup results + if len(toDelete) > 0 && internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.CircuitBreakerCleanup(len(toDelete), count)) + } + + cbm.lastCleanup.Store(now.Unix()) +} + +// Shutdown stops the cleanup goroutine +func (cbm *CircuitBreakerManager) Shutdown() { + close(cbm.cleanupStop) +} + +// Reset resets all circuit breakers (useful for testing) +func (cbm *CircuitBreakerManager) Reset() { + cbm.breakers.Range(func(key, value interface{}) bool { + entry := value.(*CircuitBreakerEntry) + breaker := entry.breaker + breaker.state.Store(int32(CircuitBreakerClosed)) + breaker.failures.Store(0) + breaker.successes.Store(0) + breaker.requests.Store(0) + breaker.lastFailureTime.Store(0) + breaker.lastSuccessTime.Store(0) + return true + }) +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/config.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/config.go new file mode 100644 index 000000000..cbf4f6b22 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/config.go @@ -0,0 +1,458 @@ +package maintnotifications + +import ( + "context" + "net" + "runtime" + "strings" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" + "github.com/redis/go-redis/v9/internal/util" +) + +// Mode represents the maintenance notifications mode +type Mode string + +// Constants for maintenance push notifications modes +const ( + ModeDisabled Mode = "disabled" // Client doesn't send CLIENT MAINT_NOTIFICATIONS ON command + ModeEnabled Mode = "enabled" // Client forcefully sends command, interrupts connection on error + ModeAuto Mode = "auto" // Client tries to send command, disables feature on error +) + +// IsValid returns true if the maintenance notifications mode is valid +func (m Mode) IsValid() bool { + switch m { + case ModeDisabled, ModeEnabled, ModeAuto: + return true + default: + return false + } +} + +// String returns the string representation of the mode +func (m Mode) String() string { + return string(m) +} + +// EndpointType represents the type of endpoint to request in MOVING notifications +type EndpointType string + +// Constants for endpoint types +const ( + EndpointTypeAuto EndpointType = "auto" // Auto-detect based on connection + EndpointTypeInternalIP EndpointType = "internal-ip" // Internal IP address + EndpointTypeInternalFQDN EndpointType = "internal-fqdn" // Internal FQDN + EndpointTypeExternalIP EndpointType = "external-ip" // External IP address + EndpointTypeExternalFQDN EndpointType = "external-fqdn" // External FQDN + EndpointTypeNone EndpointType = "none" // No endpoint (reconnect with current config) +) + +// IsValid returns true if the endpoint type is valid +func (e EndpointType) IsValid() bool { + switch e { + case EndpointTypeAuto, EndpointTypeInternalIP, EndpointTypeInternalFQDN, + EndpointTypeExternalIP, EndpointTypeExternalFQDN, EndpointTypeNone: + return true + default: + return false + } +} + +// String returns the string representation of the endpoint type +func (e EndpointType) String() string { + return string(e) +} + +// Config provides configuration options for maintenance notifications +type Config struct { + // Mode controls how client maintenance notifications are handled. + // Valid values: ModeDisabled, ModeEnabled, ModeAuto + // Default: ModeAuto + Mode Mode + + // EndpointType specifies the type of endpoint to request in MOVING notifications. + // Valid values: EndpointTypeAuto, EndpointTypeInternalIP, EndpointTypeInternalFQDN, + // EndpointTypeExternalIP, EndpointTypeExternalFQDN, EndpointTypeNone + // Default: EndpointTypeAuto + EndpointType EndpointType + + // RelaxedTimeout is the concrete timeout value to use during + // MIGRATING/FAILING_OVER states to accommodate increased latency. + // This applies to both read and write timeouts. + // Default: 10 seconds + RelaxedTimeout time.Duration + + // HandoffTimeout is the maximum time to wait for connection handoff to complete. + // If handoff takes longer than this, the old connection will be forcibly closed. + // Default: 15 seconds (matches server-side eviction timeout) + HandoffTimeout time.Duration + + // MaxWorkers is the maximum number of worker goroutines for processing handoff requests. + // Workers are created on-demand and automatically cleaned up when idle. + // If zero, defaults to min(10, PoolSize/2) to handle bursts effectively. + // If explicitly set, enforces minimum of PoolSize/2 + // + // Default: min(PoolSize/2, max(10, PoolSize/3)), Minimum when set: PoolSize/2 + MaxWorkers int + + // HandoffQueueSize is the size of the buffered channel used to queue handoff requests. + // If the queue is full, new handoff requests will be rejected. + // Scales with both worker count and pool size for better burst handling. + // + // Default: max(20×MaxWorkers, PoolSize), capped by MaxActiveConns+1 (if set) or 5×PoolSize + // When set: minimum 200, capped by MaxActiveConns+1 (if set) or 5×PoolSize + HandoffQueueSize int + + // PostHandoffRelaxedDuration is how long to keep relaxed timeouts on the new connection + // after a handoff completes. This provides additional resilience during cluster transitions. + // Default: 2 * RelaxedTimeout + PostHandoffRelaxedDuration time.Duration + + // Circuit breaker configuration for endpoint failure handling + // CircuitBreakerFailureThreshold is the number of failures before opening the circuit. + // Default: 5 + CircuitBreakerFailureThreshold int + + // CircuitBreakerResetTimeout is how long to wait before testing if the endpoint recovered. + // Default: 60 seconds + CircuitBreakerResetTimeout time.Duration + + // CircuitBreakerMaxRequests is the maximum number of requests allowed in half-open state. + // Default: 3 + CircuitBreakerMaxRequests int + + // MaxHandoffRetries is the maximum number of times to retry a failed handoff. + // After this many retries, the connection will be removed from the pool. + // Default: 3 + MaxHandoffRetries int +} + +func (c *Config) IsEnabled() bool { + return c != nil && c.Mode != ModeDisabled +} + +// DefaultConfig returns a Config with sensible defaults. +func DefaultConfig() *Config { + return &Config{ + Mode: ModeAuto, // Enable by default for Redis Cloud + EndpointType: EndpointTypeAuto, // Auto-detect based on connection + RelaxedTimeout: 10 * time.Second, + HandoffTimeout: 15 * time.Second, + MaxWorkers: 0, // Auto-calculated based on pool size + HandoffQueueSize: 0, // Auto-calculated based on max workers + PostHandoffRelaxedDuration: 0, // Auto-calculated based on relaxed timeout + + // Circuit breaker configuration + CircuitBreakerFailureThreshold: 5, + CircuitBreakerResetTimeout: 60 * time.Second, + CircuitBreakerMaxRequests: 3, + + // Connection Handoff Configuration + MaxHandoffRetries: 3, + } +} + +// Validate checks if the configuration is valid. +func (c *Config) Validate() error { + if c.RelaxedTimeout <= 0 { + return ErrInvalidRelaxedTimeout + } + if c.HandoffTimeout <= 0 { + return ErrInvalidHandoffTimeout + } + // Validate worker configuration + // Allow 0 for auto-calculation, but negative values are invalid + if c.MaxWorkers < 0 { + return ErrInvalidHandoffWorkers + } + // HandoffQueueSize validation - allow 0 for auto-calculation + if c.HandoffQueueSize < 0 { + return ErrInvalidHandoffQueueSize + } + if c.PostHandoffRelaxedDuration < 0 { + return ErrInvalidPostHandoffRelaxedDuration + } + + // Circuit breaker validation + if c.CircuitBreakerFailureThreshold < 1 { + return ErrInvalidCircuitBreakerFailureThreshold + } + if c.CircuitBreakerResetTimeout < 0 { + return ErrInvalidCircuitBreakerResetTimeout + } + if c.CircuitBreakerMaxRequests < 1 { + return ErrInvalidCircuitBreakerMaxRequests + } + + // Validate Mode (maintenance notifications mode) + if !c.Mode.IsValid() { + return ErrInvalidMaintNotifications + } + + // Validate EndpointType + if !c.EndpointType.IsValid() { + return ErrInvalidEndpointType + } + + // Validate configuration fields + if c.MaxHandoffRetries < 1 || c.MaxHandoffRetries > 10 { + return ErrInvalidHandoffRetries + } + + return nil +} + +// ApplyDefaults applies default values to any zero-value fields in the configuration. +// This ensures that partially configured structs get sensible defaults for missing fields. +func (c *Config) ApplyDefaults() *Config { + return c.ApplyDefaultsWithPoolSize(0) +} + +// ApplyDefaultsWithPoolSize applies default values to any zero-value fields in the configuration, +// using the provided pool size to calculate worker defaults. +// This ensures that partially configured structs get sensible defaults for missing fields. +func (c *Config) ApplyDefaultsWithPoolSize(poolSize int) *Config { + return c.ApplyDefaultsWithPoolConfig(poolSize, 0) +} + +// ApplyDefaultsWithPoolConfig applies default values to any zero-value fields in the configuration, +// using the provided pool size and max active connections to calculate worker and queue defaults. +// This ensures that partially configured structs get sensible defaults for missing fields. +func (c *Config) ApplyDefaultsWithPoolConfig(poolSize int, maxActiveConns int) *Config { + if c == nil { + return DefaultConfig().ApplyDefaultsWithPoolSize(poolSize) + } + + defaults := DefaultConfig() + result := &Config{} + + // Apply defaults for enum fields (empty/zero means not set) + result.Mode = defaults.Mode + if c.Mode != "" { + result.Mode = c.Mode + } + + result.EndpointType = defaults.EndpointType + if c.EndpointType != "" { + result.EndpointType = c.EndpointType + } + + // Apply defaults for duration fields (zero means not set) + result.RelaxedTimeout = defaults.RelaxedTimeout + if c.RelaxedTimeout > 0 { + result.RelaxedTimeout = c.RelaxedTimeout + } + + result.HandoffTimeout = defaults.HandoffTimeout + if c.HandoffTimeout > 0 { + result.HandoffTimeout = c.HandoffTimeout + } + + // Copy worker configuration + result.MaxWorkers = c.MaxWorkers + + // Apply worker defaults based on pool size + result.applyWorkerDefaults(poolSize) + + // Apply queue size defaults with new scaling approach + // Default: max(20x workers, PoolSize), capped by maxActiveConns or 5x pool size + workerBasedSize := result.MaxWorkers * 20 + poolBasedSize := poolSize + result.HandoffQueueSize = util.Max(workerBasedSize, poolBasedSize) + if c.HandoffQueueSize > 0 { + // When explicitly set: enforce minimum of 200 + result.HandoffQueueSize = util.Max(200, c.HandoffQueueSize) + } + + // Cap queue size: use maxActiveConns+1 if set, otherwise 5x pool size + var queueCap int + if maxActiveConns > 0 { + queueCap = maxActiveConns + 1 + // Ensure queue cap is at least 2 for very small maxActiveConns + if queueCap < 2 { + queueCap = 2 + } + } else { + queueCap = poolSize * 5 + } + result.HandoffQueueSize = util.Min(result.HandoffQueueSize, queueCap) + + // Ensure minimum queue size of 2 (fallback for very small pools) + if result.HandoffQueueSize < 2 { + result.HandoffQueueSize = 2 + } + + result.PostHandoffRelaxedDuration = result.RelaxedTimeout * 2 + if c.PostHandoffRelaxedDuration > 0 { + result.PostHandoffRelaxedDuration = c.PostHandoffRelaxedDuration + } + + // Apply defaults for configuration fields + result.MaxHandoffRetries = defaults.MaxHandoffRetries + if c.MaxHandoffRetries > 0 { + result.MaxHandoffRetries = c.MaxHandoffRetries + } + + // Circuit breaker configuration + result.CircuitBreakerFailureThreshold = defaults.CircuitBreakerFailureThreshold + if c.CircuitBreakerFailureThreshold > 0 { + result.CircuitBreakerFailureThreshold = c.CircuitBreakerFailureThreshold + } + + result.CircuitBreakerResetTimeout = defaults.CircuitBreakerResetTimeout + if c.CircuitBreakerResetTimeout > 0 { + result.CircuitBreakerResetTimeout = c.CircuitBreakerResetTimeout + } + + result.CircuitBreakerMaxRequests = defaults.CircuitBreakerMaxRequests + if c.CircuitBreakerMaxRequests > 0 { + result.CircuitBreakerMaxRequests = c.CircuitBreakerMaxRequests + } + + if internal.LogLevel.DebugOrAbove() { + internal.Logger.Printf(context.Background(), logs.DebugLoggingEnabled()) + internal.Logger.Printf(context.Background(), logs.ConfigDebug(result)) + } + return result +} + +// Clone creates a deep copy of the configuration. +func (c *Config) Clone() *Config { + if c == nil { + return DefaultConfig() + } + + return &Config{ + Mode: c.Mode, + EndpointType: c.EndpointType, + RelaxedTimeout: c.RelaxedTimeout, + HandoffTimeout: c.HandoffTimeout, + MaxWorkers: c.MaxWorkers, + HandoffQueueSize: c.HandoffQueueSize, + PostHandoffRelaxedDuration: c.PostHandoffRelaxedDuration, + + // Circuit breaker configuration + CircuitBreakerFailureThreshold: c.CircuitBreakerFailureThreshold, + CircuitBreakerResetTimeout: c.CircuitBreakerResetTimeout, + CircuitBreakerMaxRequests: c.CircuitBreakerMaxRequests, + + // Configuration fields + MaxHandoffRetries: c.MaxHandoffRetries, + } +} + +// applyWorkerDefaults calculates and applies worker defaults based on pool size +func (c *Config) applyWorkerDefaults(poolSize int) { + // Calculate defaults based on pool size + if poolSize <= 0 { + poolSize = 10 * runtime.GOMAXPROCS(0) + } + + // When not set: min(poolSize/2, max(10, poolSize/3)) - balanced scaling approach + originalMaxWorkers := c.MaxWorkers + c.MaxWorkers = util.Min(poolSize/2, util.Max(10, poolSize/3)) + if originalMaxWorkers != 0 { + // When explicitly set: max(poolSize/2, set_value) - ensure at least poolSize/2 workers + c.MaxWorkers = util.Max(poolSize/2, originalMaxWorkers) + } + + // Ensure minimum of 1 worker (fallback for very small pools) + if c.MaxWorkers < 1 { + c.MaxWorkers = 1 + } +} + +// DetectEndpointType automatically detects the appropriate endpoint type +// based on the connection address and TLS configuration. +// +// For IP addresses: +// - If TLS is enabled: requests FQDN for proper certificate validation +// - If TLS is disabled: requests IP for better performance +// +// For hostnames: +// - If TLS is enabled: always requests FQDN for proper certificate validation +// - If TLS is disabled: requests IP for better performance +// +// Internal vs External detection: +// - For IPs: uses private IP range detection +// - For hostnames: uses heuristics based on common internal naming patterns +func DetectEndpointType(addr string, tlsEnabled bool) EndpointType { + // Extract host from "host:port" format + host, _, err := net.SplitHostPort(addr) + if err != nil { + host = addr // Assume no port + } + + // Check if the host is an IP address or hostname + ip := net.ParseIP(host) + isIPAddress := ip != nil + var endpointType EndpointType + + if isIPAddress { + // Address is an IP - determine if it's private or public + isPrivate := ip.IsPrivate() || ip.IsLoopback() || ip.IsLinkLocalUnicast() + + if tlsEnabled { + // TLS with IP addresses - still prefer FQDN for certificate validation + if isPrivate { + endpointType = EndpointTypeInternalFQDN + } else { + endpointType = EndpointTypeExternalFQDN + } + } else { + // No TLS - can use IP addresses directly + if isPrivate { + endpointType = EndpointTypeInternalIP + } else { + endpointType = EndpointTypeExternalIP + } + } + } else { + // Address is a hostname + isInternalHostname := isInternalHostname(host) + if isInternalHostname { + endpointType = EndpointTypeInternalFQDN + } else { + endpointType = EndpointTypeExternalFQDN + } + } + + return endpointType +} + +// isInternalHostname determines if a hostname appears to be internal/private. +// This is a heuristic based on common naming patterns. +func isInternalHostname(hostname string) bool { + // Convert to lowercase for comparison + hostname = strings.ToLower(hostname) + + // Common internal hostname patterns + internalPatterns := []string{ + "localhost", + ".local", + ".internal", + ".corp", + ".lan", + ".intranet", + ".private", + } + + // Check for exact match or suffix match + for _, pattern := range internalPatterns { + if hostname == pattern || strings.HasSuffix(hostname, pattern) { + return true + } + } + + // Check for RFC 1918 style hostnames (e.g., redis-1, db-server, etc.) + // If hostname doesn't contain dots, it's likely internal + if !strings.Contains(hostname, ".") { + return true + } + + // Default to external for fully qualified domain names + return false +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/errors.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/errors.go new file mode 100644 index 000000000..049656bdd --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/errors.go @@ -0,0 +1,76 @@ +package maintnotifications + +import ( + "errors" + + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" +) + +// Configuration errors +var ( + ErrInvalidRelaxedTimeout = errors.New(logs.InvalidRelaxedTimeoutError()) + ErrInvalidHandoffTimeout = errors.New(logs.InvalidHandoffTimeoutError()) + ErrInvalidHandoffWorkers = errors.New(logs.InvalidHandoffWorkersError()) + ErrInvalidHandoffQueueSize = errors.New(logs.InvalidHandoffQueueSizeError()) + ErrInvalidPostHandoffRelaxedDuration = errors.New(logs.InvalidPostHandoffRelaxedDurationError()) + ErrInvalidEndpointType = errors.New(logs.InvalidEndpointTypeError()) + ErrInvalidMaintNotifications = errors.New(logs.InvalidMaintNotificationsError()) + ErrMaxHandoffRetriesReached = errors.New(logs.MaxHandoffRetriesReachedError()) + + // Configuration validation errors + + // ErrInvalidHandoffRetries is returned when the number of handoff retries is invalid + ErrInvalidHandoffRetries = errors.New(logs.InvalidHandoffRetriesError()) +) + +// Integration errors +var ( + // ErrInvalidClient is returned when the client does not support push notifications + ErrInvalidClient = errors.New(logs.InvalidClientError()) +) + +// Handoff errors +var ( + // ErrHandoffQueueFull is returned when the handoff queue is full + ErrHandoffQueueFull = errors.New(logs.HandoffQueueFullError()) +) + +// Notification errors +var ( + // ErrInvalidNotification is returned when a notification is in an invalid format + ErrInvalidNotification = errors.New(logs.InvalidNotificationError()) +) + +// connection handoff errors +var ( + // ErrConnectionMarkedForHandoff is returned when a connection is marked for handoff + // and should not be used until the handoff is complete + ErrConnectionMarkedForHandoff = errors.New(logs.ConnectionMarkedForHandoffErrorMessage) + // ErrConnectionMarkedForHandoffWithState is returned when a connection is marked for handoff + // and should not be used until the handoff is complete + ErrConnectionMarkedForHandoffWithState = errors.New(logs.ConnectionMarkedForHandoffErrorMessage + " with state") + // ErrConnectionInvalidHandoffState is returned when a connection is in an invalid state for handoff + ErrConnectionInvalidHandoffState = errors.New(logs.ConnectionInvalidHandoffStateErrorMessage) +) + +// shutdown errors +var ( + // ErrShutdown is returned when the maintnotifications manager is shutdown + ErrShutdown = errors.New(logs.ShutdownError()) +) + +// circuit breaker errors +var ( + // ErrCircuitBreakerOpen is returned when the circuit breaker is open + ErrCircuitBreakerOpen = errors.New(logs.CircuitBreakerOpenErrorMessage) +) + +// circuit breaker configuration errors +var ( + // ErrInvalidCircuitBreakerFailureThreshold is returned when the circuit breaker failure threshold is invalid + ErrInvalidCircuitBreakerFailureThreshold = errors.New(logs.InvalidCircuitBreakerFailureThresholdError()) + // ErrInvalidCircuitBreakerResetTimeout is returned when the circuit breaker reset timeout is invalid + ErrInvalidCircuitBreakerResetTimeout = errors.New(logs.InvalidCircuitBreakerResetTimeoutError()) + // ErrInvalidCircuitBreakerMaxRequests is returned when the circuit breaker max requests is invalid + ErrInvalidCircuitBreakerMaxRequests = errors.New(logs.InvalidCircuitBreakerMaxRequestsError()) +) diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/example_hooks.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/example_hooks.go new file mode 100644 index 000000000..3a3465571 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/example_hooks.go @@ -0,0 +1,101 @@ +package maintnotifications + +import ( + "context" + "fmt" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" + "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/push" +) + +// contextKey is a custom type for context keys to avoid collisions +type contextKey string + +const ( + startTimeKey contextKey = "maint_notif_start_time" +) + +// MetricsHook collects metrics about notification processing. +type MetricsHook struct { + NotificationCounts map[string]int64 + ProcessingTimes map[string]time.Duration + ErrorCounts map[string]int64 + HandoffCounts int64 // Total handoffs initiated + HandoffSuccesses int64 // Successful handoffs + HandoffFailures int64 // Failed handoffs +} + +// NewMetricsHook creates a new metrics collection hook. +func NewMetricsHook() *MetricsHook { + return &MetricsHook{ + NotificationCounts: make(map[string]int64), + ProcessingTimes: make(map[string]time.Duration), + ErrorCounts: make(map[string]int64), + } +} + +// PreHook records the start time for processing metrics. +func (mh *MetricsHook) PreHook(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}) ([]interface{}, bool) { + mh.NotificationCounts[notificationType]++ + + // Log connection information if available + if conn, ok := notificationCtx.Conn.(*pool.Conn); ok { + internal.Logger.Printf(ctx, logs.MetricsHookProcessingNotification(notificationType, conn.GetID())) + } + + // Store start time in context for duration calculation + startTime := time.Now() + _ = context.WithValue(ctx, startTimeKey, startTime) // Context not used further + + return notification, true +} + +// PostHook records processing completion and any errors. +func (mh *MetricsHook) PostHook(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}, result error) { + // Calculate processing duration + if startTime, ok := ctx.Value(startTimeKey).(time.Time); ok { + duration := time.Since(startTime) + mh.ProcessingTimes[notificationType] = duration + } + + // Record errors + if result != nil { + mh.ErrorCounts[notificationType]++ + + // Log error details with connection information + if conn, ok := notificationCtx.Conn.(*pool.Conn); ok { + internal.Logger.Printf(ctx, logs.MetricsHookRecordedError(notificationType, conn.GetID(), result)) + } + } +} + +// GetMetrics returns a summary of collected metrics. +func (mh *MetricsHook) GetMetrics() map[string]interface{} { + return map[string]interface{}{ + "notification_counts": mh.NotificationCounts, + "processing_times": mh.ProcessingTimes, + "error_counts": mh.ErrorCounts, + } +} + +// ExampleCircuitBreakerMonitor demonstrates how to monitor circuit breaker status +func ExampleCircuitBreakerMonitor(poolHook *PoolHook) { + // Get circuit breaker statistics + stats := poolHook.GetCircuitBreakerStats() + + for _, stat := range stats { + fmt.Printf("Circuit Breaker for %s:\n", stat.Endpoint) + fmt.Printf(" State: %s\n", stat.State) + fmt.Printf(" Failures: %d\n", stat.Failures) + fmt.Printf(" Last Failure: %v\n", stat.LastFailureTime) + fmt.Printf(" Last Success: %v\n", stat.LastSuccessTime) + + // Alert if circuit breaker is open + if stat.State.String() == "open" { + fmt.Printf(" ⚠️ ALERT: Circuit breaker is OPEN for %s\n", stat.Endpoint) + } + } +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/handoff_worker.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/handoff_worker.go new file mode 100644 index 000000000..5b60e39b5 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/handoff_worker.go @@ -0,0 +1,512 @@ +package maintnotifications + +import ( + "context" + "errors" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" + "github.com/redis/go-redis/v9/internal/pool" +) + +// handoffWorkerManager manages background workers and queue for connection handoffs +type handoffWorkerManager struct { + // Event-driven handoff support + handoffQueue chan HandoffRequest // Queue for handoff requests + shutdown chan struct{} // Shutdown signal + shutdownOnce sync.Once // Ensure clean shutdown + workerWg sync.WaitGroup // Track worker goroutines + + // On-demand worker management + maxWorkers int + activeWorkers atomic.Int32 + workerTimeout time.Duration // How long workers wait for work before exiting + workersScaling atomic.Bool + + // Simple state tracking + pending sync.Map // map[uint64]int64 (connID -> seqID) + + // Configuration for the maintenance notifications + config *Config + + // Pool hook reference for handoff processing + poolHook *PoolHook + + // Circuit breaker manager for endpoint failure handling + circuitBreakerManager *CircuitBreakerManager +} + +// newHandoffWorkerManager creates a new handoff worker manager +func newHandoffWorkerManager(config *Config, poolHook *PoolHook) *handoffWorkerManager { + return &handoffWorkerManager{ + handoffQueue: make(chan HandoffRequest, config.HandoffQueueSize), + shutdown: make(chan struct{}), + maxWorkers: config.MaxWorkers, + activeWorkers: atomic.Int32{}, // Start with no workers - create on demand + workerTimeout: 15 * time.Second, // Workers exit after 15s of inactivity + config: config, + poolHook: poolHook, + circuitBreakerManager: newCircuitBreakerManager(config), + } +} + +// getCurrentWorkers returns the current number of active workers (for testing) +func (hwm *handoffWorkerManager) getCurrentWorkers() int { + return int(hwm.activeWorkers.Load()) +} + +// getPendingMap returns the pending map for testing purposes +func (hwm *handoffWorkerManager) getPendingMap() *sync.Map { + return &hwm.pending +} + +// getMaxWorkers returns the max workers for testing purposes +func (hwm *handoffWorkerManager) getMaxWorkers() int { + return hwm.maxWorkers +} + +// getHandoffQueue returns the handoff queue for testing purposes +func (hwm *handoffWorkerManager) getHandoffQueue() chan HandoffRequest { + return hwm.handoffQueue +} + +// getCircuitBreakerStats returns circuit breaker statistics for monitoring +func (hwm *handoffWorkerManager) getCircuitBreakerStats() []CircuitBreakerStats { + return hwm.circuitBreakerManager.GetAllStats() +} + +// resetCircuitBreakers resets all circuit breakers (useful for testing) +func (hwm *handoffWorkerManager) resetCircuitBreakers() { + hwm.circuitBreakerManager.Reset() +} + +// isHandoffPending returns true if the given connection has a pending handoff +func (hwm *handoffWorkerManager) isHandoffPending(conn *pool.Conn) bool { + _, pending := hwm.pending.Load(conn.GetID()) + return pending +} + +// ensureWorkerAvailable ensures at least one worker is available to process requests +// Creates a new worker if needed and under the max limit +func (hwm *handoffWorkerManager) ensureWorkerAvailable() { + select { + case <-hwm.shutdown: + return + default: + if hwm.workersScaling.CompareAndSwap(false, true) { + defer hwm.workersScaling.Store(false) + // Check if we need a new worker + currentWorkers := hwm.activeWorkers.Load() + workersWas := currentWorkers + for currentWorkers < int32(hwm.maxWorkers) { + hwm.workerWg.Add(1) + go hwm.onDemandWorker() + currentWorkers++ + } + // workersWas is always <= currentWorkers + // currentWorkers will be maxWorkers, but if we have a worker that was closed + // while we were creating new workers, just add the difference between + // the currentWorkers and the number of workers we observed initially (i.e. the number of workers we created) + hwm.activeWorkers.Add(currentWorkers - workersWas) + } + } +} + +// onDemandWorker processes handoff requests and exits when idle +func (hwm *handoffWorkerManager) onDemandWorker() { + defer func() { + // Handle panics to ensure proper cleanup + if r := recover(); r != nil { + internal.Logger.Printf(context.Background(), logs.WorkerPanicRecovered(r)) + } + + // Decrement active worker count when exiting + hwm.activeWorkers.Add(-1) + hwm.workerWg.Done() + }() + + // Create reusable timer to prevent timer leaks + timer := time.NewTimer(hwm.workerTimeout) + defer timer.Stop() + + for { + // Reset timer for next iteration + if !timer.Stop() { + select { + case <-timer.C: + default: + } + } + timer.Reset(hwm.workerTimeout) + + select { + case <-hwm.shutdown: + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.WorkerExitingDueToShutdown()) + } + return + case <-timer.C: + // Worker has been idle for too long, exit to save resources + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.WorkerExitingDueToInactivityTimeout(hwm.workerTimeout)) + } + return + case request := <-hwm.handoffQueue: + // Check for shutdown before processing + select { + case <-hwm.shutdown: + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.WorkerExitingDueToShutdownWhileProcessing()) + } + // Clean up the request before exiting + hwm.pending.Delete(request.ConnID) + return + default: + // Process the request + hwm.processHandoffRequest(request) + } + } + } +} + +// processHandoffRequest processes a single handoff request +func (hwm *handoffWorkerManager) processHandoffRequest(request HandoffRequest) { + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.HandoffStarted(request.Conn.GetID(), request.Endpoint)) + } + + // Create a context with handoff timeout from config + handoffTimeout := 15 * time.Second // Default timeout + if hwm.config != nil && hwm.config.HandoffTimeout > 0 { + handoffTimeout = hwm.config.HandoffTimeout + } + ctx, cancel := context.WithTimeout(context.Background(), handoffTimeout) + defer cancel() + + // Create a context that also respects the shutdown signal + shutdownCtx, shutdownCancel := context.WithCancel(ctx) + defer shutdownCancel() + + // Monitor shutdown signal in a separate goroutine + go func() { + select { + case <-hwm.shutdown: + shutdownCancel() + case <-shutdownCtx.Done(): + } + }() + + // Perform the handoff with cancellable context + shouldRetry, err := hwm.performConnectionHandoff(shutdownCtx, request.Conn) + minRetryBackoff := 500 * time.Millisecond + if err != nil { + if shouldRetry { + now := time.Now() + deadline, ok := shutdownCtx.Deadline() + thirdOfTimeout := handoffTimeout / 3 + if !ok || deadline.Before(now) { + // wait half the timeout before retrying if no deadline or deadline has passed + deadline = now.Add(thirdOfTimeout) + } + afterTime := deadline.Sub(now) + if afterTime < minRetryBackoff { + afterTime = minRetryBackoff + } + + if internal.LogLevel.InfoOrAbove() { + // Get current retry count for better logging + currentRetries := request.Conn.HandoffRetries() + maxRetries := 3 // Default fallback + if hwm.config != nil { + maxRetries = hwm.config.MaxHandoffRetries + } + internal.Logger.Printf(context.Background(), logs.HandoffFailed(request.ConnID, request.Endpoint, currentRetries, maxRetries, err)) + } + // Schedule retry - keep connection in pending map until retry is queued + time.AfterFunc(afterTime, func() { + if err := hwm.queueHandoff(request.Conn); err != nil { + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(context.Background(), logs.CannotQueueHandoffForRetry(err)) + } + // Failed to queue retry - remove from pending and close connection + hwm.pending.Delete(request.Conn.GetID()) + hwm.closeConnFromRequest(context.Background(), request, err) + } else { + // Successfully queued retry - remove from pending (will be re-added by queueHandoff) + hwm.pending.Delete(request.Conn.GetID()) + } + }) + return + } else { + // Won't retry - remove from pending and close connection + hwm.pending.Delete(request.Conn.GetID()) + go hwm.closeConnFromRequest(ctx, request, err) + } + + // Clear handoff state if not returned for retry + seqID := request.Conn.GetMovingSeqID() + connID := request.Conn.GetID() + if hwm.poolHook.operationsManager != nil { + hwm.poolHook.operationsManager.UntrackOperationWithConnID(seqID, connID) + } + } else { + // Success - remove from pending map + hwm.pending.Delete(request.Conn.GetID()) + } +} + +// queueHandoff queues a handoff request for processing +// if err is returned, connection will be removed from pool +func (hwm *handoffWorkerManager) queueHandoff(conn *pool.Conn) error { + // Get handoff info atomically to prevent race conditions + shouldHandoff, endpoint, seqID := conn.GetHandoffInfo() + + // on retries the connection will not be marked for handoff, but it will have retries > 0 + // if shouldHandoff is false and retries is 0, then we are not retrying and not do a handoff + if !shouldHandoff && conn.HandoffRetries() == 0 { + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.ConnectionNotMarkedForHandoff(conn.GetID())) + } + return errors.New(logs.ConnectionNotMarkedForHandoffError(conn.GetID())) + } + + // Create handoff request with atomically retrieved data + request := HandoffRequest{ + Conn: conn, + ConnID: conn.GetID(), + Endpoint: endpoint, + SeqID: seqID, + Pool: hwm.poolHook.pool, // Include pool for connection removal on failure + } + + select { + // priority to shutdown + case <-hwm.shutdown: + return ErrShutdown + default: + select { + case <-hwm.shutdown: + return ErrShutdown + case hwm.handoffQueue <- request: + // Store in pending map + hwm.pending.Store(request.ConnID, request.SeqID) + // Ensure we have a worker to process this request + hwm.ensureWorkerAvailable() + return nil + default: + select { + case <-hwm.shutdown: + return ErrShutdown + case hwm.handoffQueue <- request: + // Store in pending map + hwm.pending.Store(request.ConnID, request.SeqID) + // Ensure we have a worker to process this request + hwm.ensureWorkerAvailable() + return nil + case <-time.After(100 * time.Millisecond): // give workers a chance to process + // Queue is full - log and attempt scaling + queueLen := len(hwm.handoffQueue) + queueCap := cap(hwm.handoffQueue) + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(context.Background(), logs.HandoffQueueFull(queueLen, queueCap)) + } + } + } + } + + // Ensure we have workers available to handle the load + hwm.ensureWorkerAvailable() + return ErrHandoffQueueFull +} + +// shutdownWorkers gracefully shuts down the worker manager, waiting for workers to complete +func (hwm *handoffWorkerManager) shutdownWorkers(ctx context.Context) error { + hwm.shutdownOnce.Do(func() { + close(hwm.shutdown) + // workers will exit when they finish their current request + + // Shutdown circuit breaker manager cleanup goroutine + if hwm.circuitBreakerManager != nil { + hwm.circuitBreakerManager.Shutdown() + } + }) + + // Wait for workers to complete + done := make(chan struct{}) + go func() { + hwm.workerWg.Wait() + close(done) + }() + + select { + case <-done: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// performConnectionHandoff performs the actual connection handoff +// When error is returned, the connection handoff should be retried if err is not ErrMaxHandoffRetriesReached +func (hwm *handoffWorkerManager) performConnectionHandoff(ctx context.Context, conn *pool.Conn) (shouldRetry bool, err error) { + // Clear handoff state after successful handoff + connID := conn.GetID() + + newEndpoint := conn.GetHandoffEndpoint() + if newEndpoint == "" { + return false, ErrConnectionInvalidHandoffState + } + + // Use circuit breaker to protect against failing endpoints + circuitBreaker := hwm.circuitBreakerManager.GetCircuitBreaker(newEndpoint) + + // Check if circuit breaker is open before attempting handoff + if circuitBreaker.IsOpen() { + internal.Logger.Printf(ctx, logs.CircuitBreakerOpen(connID, newEndpoint)) + return false, ErrCircuitBreakerOpen // Don't retry when circuit breaker is open + } + + // Perform the handoff + shouldRetry, err = hwm.performHandoffInternal(ctx, conn, newEndpoint, connID) + + // Update circuit breaker based on result + if err != nil { + // Only track dial/network errors in circuit breaker, not initialization errors + if shouldRetry { + circuitBreaker.recordFailure() + } + return shouldRetry, err + } + + // Success - record in circuit breaker + circuitBreaker.recordSuccess() + return false, nil +} + +// performHandoffInternal performs the actual handoff logic (extracted for circuit breaker integration) +func (hwm *handoffWorkerManager) performHandoffInternal( + ctx context.Context, + conn *pool.Conn, + newEndpoint string, + connID uint64, +) (shouldRetry bool, err error) { + retries := conn.IncrementAndGetHandoffRetries(1) + internal.Logger.Printf(ctx, logs.HandoffRetryAttempt(connID, retries, newEndpoint, conn.RemoteAddr().String())) + maxRetries := 3 // Default fallback + if hwm.config != nil { + maxRetries = hwm.config.MaxHandoffRetries + } + + if retries > maxRetries { + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(ctx, logs.ReachedMaxHandoffRetries(connID, newEndpoint, maxRetries)) + } + // won't retry on ErrMaxHandoffRetriesReached + return false, ErrMaxHandoffRetriesReached + } + + // Create endpoint-specific dialer + endpointDialer := hwm.createEndpointDialer(newEndpoint) + + // Create new connection to the new endpoint + newNetConn, err := endpointDialer(ctx) + if err != nil { + internal.Logger.Printf(ctx, logs.FailedToDialNewEndpoint(connID, newEndpoint, err)) + // will retry + // Maybe a network error - retry after a delay + return true, err + } + + // Get the old connection + oldConn := conn.GetNetConn() + + // Apply relaxed timeout to the new connection for the configured post-handoff duration + // This gives the new connection more time to handle operations during cluster transition + // Setting this here (before initing the connection) ensures that the connection is going + // to use the relaxed timeout for the first operation (auth/ACL select) + if hwm.config != nil && hwm.config.PostHandoffRelaxedDuration > 0 { + relaxedTimeout := hwm.config.RelaxedTimeout + // Set relaxed timeout with deadline - no background goroutine needed + deadline := time.Now().Add(hwm.config.PostHandoffRelaxedDuration) + conn.SetRelaxedTimeoutWithDeadline(relaxedTimeout, relaxedTimeout, deadline) + + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.ApplyingRelaxedTimeoutDueToPostHandoff(connID, relaxedTimeout, deadline.Format("15:04:05.000"))) + } + } + + // Replace the connection and execute initialization + err = conn.SetNetConnAndInitConn(ctx, newNetConn) + if err != nil { + // won't retry + // Initialization failed - remove the connection + return false, err + } + defer func() { + if oldConn != nil { + oldConn.Close() + } + }() + + // Clear handoff state will: + // - set the connection as usable again + // - clear the handoff state (shouldHandoff, endpoint, seqID) + // - reset the handoff retries to 0 + // Note: Theoretically there may be a short window where the connection is in the pool + // and IDLE (initConn completed) but still has handoff state set. + conn.ClearHandoffState() + internal.Logger.Printf(ctx, logs.HandoffSucceeded(connID, newEndpoint)) + + // successfully completed the handoff, no retry needed and no error + return false, nil +} + +// createEndpointDialer creates a dialer function that connects to a specific endpoint +func (hwm *handoffWorkerManager) createEndpointDialer(endpoint string) func(context.Context) (net.Conn, error) { + return func(ctx context.Context) (net.Conn, error) { + // Parse endpoint to extract host and port + host, port, err := net.SplitHostPort(endpoint) + if err != nil { + // If no port specified, assume default Redis port + host = endpoint + if port == "" { + port = "6379" + } + } + + // Use the base dialer to connect to the new endpoint + return hwm.poolHook.baseDialer(ctx, hwm.poolHook.network, net.JoinHostPort(host, port)) + } +} + +// closeConnFromRequest closes the connection and logs the reason +func (hwm *handoffWorkerManager) closeConnFromRequest(ctx context.Context, request HandoffRequest, err error) { + pooler := request.Pool + conn := request.Conn + + // Clear handoff state before closing + conn.ClearHandoffState() + + if pooler != nil { + // Use RemoveWithoutTurn instead of Remove to avoid freeing a turn that we don't have. + // The handoff worker doesn't call Get(), so it doesn't have a turn to free. + // Remove() is meant to be called after Get() and frees a turn. + // RemoveWithoutTurn() removes and closes the connection without affecting the queue. + pooler.RemoveWithoutTurn(ctx, conn, err) + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(ctx, logs.RemovingConnectionFromPool(conn.GetID(), err)) + } + } else { + err := conn.Close() // Close the connection if no pool provided + if err != nil { + internal.Logger.Printf(ctx, "redis: failed to close connection: %v", err) + } + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(ctx, logs.NoPoolProvidedCannotRemove(conn.GetID(), err)) + } + } +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/hooks.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/hooks.go new file mode 100644 index 000000000..ee3c3819c --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/hooks.go @@ -0,0 +1,60 @@ +package maintnotifications + +import ( + "context" + "slices" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" + "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/push" +) + +// LoggingHook is an example hook implementation that logs all notifications. +type LoggingHook struct { + LogLevel int // 0=Error, 1=Warn, 2=Info, 3=Debug +} + +// PreHook logs the notification before processing and allows modification. +func (lh *LoggingHook) PreHook(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}) ([]interface{}, bool) { + if lh.LogLevel >= 2 { // Info level + // Log the notification type and content + connID := uint64(0) + if conn, ok := notificationCtx.Conn.(*pool.Conn); ok { + connID = conn.GetID() + } + seqID := int64(0) + if slices.Contains(maintenanceNotificationTypes, notificationType) { + // seqID is the second element in the notification array + if len(notification) > 1 { + if parsedSeqID, ok := notification[1].(int64); !ok { + seqID = 0 + } else { + seqID = parsedSeqID + } + } + + } + internal.Logger.Printf(ctx, logs.ProcessingNotification(connID, seqID, notificationType, notification)) + } + return notification, true // Continue processing with unmodified notification +} + +// PostHook logs the result after processing. +func (lh *LoggingHook) PostHook(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}, result error) { + connID := uint64(0) + if conn, ok := notificationCtx.Conn.(*pool.Conn); ok { + connID = conn.GetID() + } + if result != nil && lh.LogLevel >= 1 { // Warning level + internal.Logger.Printf(ctx, logs.ProcessingNotificationFailed(connID, notificationType, result, notification)) + } else if lh.LogLevel >= 3 { // Debug level + internal.Logger.Printf(ctx, logs.ProcessingNotificationSucceeded(connID, notificationType)) + } +} + +// NewLoggingHook creates a new logging hook with the specified log level. +// Log levels: 0=Error, 1=Warn, 2=Info, 3=Debug +func NewLoggingHook(logLevel int) *LoggingHook { + return &LoggingHook{LogLevel: logLevel} +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/manager.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/manager.go new file mode 100644 index 000000000..775c163e1 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/manager.go @@ -0,0 +1,320 @@ +package maintnotifications + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/interfaces" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" + "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/push" +) + +// Push notification type constants for maintenance +const ( + NotificationMoving = "MOVING" + NotificationMigrating = "MIGRATING" + NotificationMigrated = "MIGRATED" + NotificationFailingOver = "FAILING_OVER" + NotificationFailedOver = "FAILED_OVER" +) + +// maintenanceNotificationTypes contains all notification types that maintenance handles +var maintenanceNotificationTypes = []string{ + NotificationMoving, + NotificationMigrating, + NotificationMigrated, + NotificationFailingOver, + NotificationFailedOver, +} + +// NotificationHook is called before and after notification processing +// PreHook can modify the notification and return false to skip processing +// PostHook is called after successful processing +type NotificationHook interface { + PreHook(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}) ([]interface{}, bool) + PostHook(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}, result error) +} + +// MovingOperationKey provides a unique key for tracking MOVING operations +// that combines sequence ID with connection identifier to handle duplicate +// sequence IDs across multiple connections to the same node. +type MovingOperationKey struct { + SeqID int64 // Sequence ID from MOVING notification + ConnID uint64 // Unique connection identifier +} + +// String returns a string representation of the key for debugging +func (k MovingOperationKey) String() string { + return fmt.Sprintf("seq:%d-conn:%d", k.SeqID, k.ConnID) +} + +// Manager provides a simplified upgrade functionality with hooks and atomic state. +type Manager struct { + client interfaces.ClientInterface + config *Config + options interfaces.OptionsInterface + pool pool.Pooler + + // MOVING operation tracking - using sync.Map for better concurrent performance + activeMovingOps sync.Map // map[MovingOperationKey]*MovingOperation + + // Atomic state tracking - no locks needed for state queries + activeOperationCount atomic.Int64 // Number of active operations + closed atomic.Bool // Manager closed state + + // Notification hooks for extensibility + hooks []NotificationHook + hooksMu sync.RWMutex // Protects hooks slice + poolHooksRef *PoolHook +} + +// MovingOperation tracks an active MOVING operation. +type MovingOperation struct { + SeqID int64 + NewEndpoint string + StartTime time.Time + Deadline time.Time +} + +// NewManager creates a new simplified manager. +func NewManager(client interfaces.ClientInterface, pool pool.Pooler, config *Config) (*Manager, error) { + if client == nil { + return nil, ErrInvalidClient + } + + hm := &Manager{ + client: client, + pool: pool, + options: client.GetOptions(), + config: config.Clone(), + hooks: make([]NotificationHook, 0), + } + + // Set up push notification handling + if err := hm.setupPushNotifications(); err != nil { + return nil, err + } + + return hm, nil +} + +// GetPoolHook creates a pool hook with a custom dialer. +func (hm *Manager) InitPoolHook(baseDialer func(context.Context, string, string) (net.Conn, error)) { + poolHook := hm.createPoolHook(baseDialer) + hm.pool.AddPoolHook(poolHook) +} + +// setupPushNotifications sets up push notification handling by registering with the client's processor. +func (hm *Manager) setupPushNotifications() error { + processor := hm.client.GetPushProcessor() + if processor == nil { + return ErrInvalidClient // Client doesn't support push notifications + } + + // Create our notification handler + handler := &NotificationHandler{manager: hm, operationsManager: hm} + + // Register handlers for all upgrade notifications with the client's processor + for _, notificationType := range maintenanceNotificationTypes { + if err := processor.RegisterHandler(notificationType, handler, true); err != nil { + return errors.New(logs.FailedToRegisterHandler(notificationType, err)) + } + } + + return nil +} + +// TrackMovingOperationWithConnID starts a new MOVING operation with a specific connection ID. +func (hm *Manager) TrackMovingOperationWithConnID(ctx context.Context, newEndpoint string, deadline time.Time, seqID int64, connID uint64) error { + // Create composite key + key := MovingOperationKey{ + SeqID: seqID, + ConnID: connID, + } + + // Create MOVING operation record + movingOp := &MovingOperation{ + SeqID: seqID, + NewEndpoint: newEndpoint, + StartTime: time.Now(), + Deadline: deadline, + } + + // Use LoadOrStore for atomic check-and-set operation + if _, loaded := hm.activeMovingOps.LoadOrStore(key, movingOp); loaded { + // Duplicate MOVING notification, ignore + if internal.LogLevel.DebugOrAbove() { // Debug level + internal.Logger.Printf(context.Background(), logs.DuplicateMovingOperation(connID, newEndpoint, seqID)) + } + return nil + } + if internal.LogLevel.DebugOrAbove() { // Debug level + internal.Logger.Printf(context.Background(), logs.TrackingMovingOperation(connID, newEndpoint, seqID)) + } + + // Increment active operation count atomically + hm.activeOperationCount.Add(1) + + return nil +} + +// UntrackOperationWithConnID completes a MOVING operation with a specific connection ID. +func (hm *Manager) UntrackOperationWithConnID(seqID int64, connID uint64) { + // Create composite key + key := MovingOperationKey{ + SeqID: seqID, + ConnID: connID, + } + + // Remove from active operations atomically + if _, loaded := hm.activeMovingOps.LoadAndDelete(key); loaded { + if internal.LogLevel.DebugOrAbove() { // Debug level + internal.Logger.Printf(context.Background(), logs.UntrackingMovingOperation(connID, seqID)) + } + // Decrement active operation count only if operation existed + hm.activeOperationCount.Add(-1) + } else { + if internal.LogLevel.DebugOrAbove() { // Debug level + internal.Logger.Printf(context.Background(), logs.OperationNotTracked(connID, seqID)) + } + } +} + +// GetActiveMovingOperations returns active operations with composite keys. +// WARNING: This method creates a new map and copies all operations on every call. +// Use sparingly, especially in hot paths or high-frequency logging. +func (hm *Manager) GetActiveMovingOperations() map[MovingOperationKey]*MovingOperation { + result := make(map[MovingOperationKey]*MovingOperation) + + // Iterate over sync.Map to build result + hm.activeMovingOps.Range(func(key, value interface{}) bool { + k := key.(MovingOperationKey) + op := value.(*MovingOperation) + + // Create a copy to avoid sharing references + result[k] = &MovingOperation{ + SeqID: op.SeqID, + NewEndpoint: op.NewEndpoint, + StartTime: op.StartTime, + Deadline: op.Deadline, + } + return true // Continue iteration + }) + + return result +} + +// IsHandoffInProgress returns true if any handoff is in progress. +// Uses atomic counter for lock-free operation. +func (hm *Manager) IsHandoffInProgress() bool { + return hm.activeOperationCount.Load() > 0 +} + +// GetActiveOperationCount returns the number of active operations. +// Uses atomic counter for lock-free operation. +func (hm *Manager) GetActiveOperationCount() int64 { + return hm.activeOperationCount.Load() +} + +// Close closes the manager. +func (hm *Manager) Close() error { + // Use atomic operation for thread-safe close check + if !hm.closed.CompareAndSwap(false, true) { + return nil // Already closed + } + + // Shutdown the pool hook if it exists + if hm.poolHooksRef != nil { + // Use a timeout to prevent hanging indefinitely + shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + err := hm.poolHooksRef.Shutdown(shutdownCtx) + if err != nil { + // was not able to close pool hook, keep closed state false + hm.closed.Store(false) + return err + } + // Remove the pool hook from the pool + if hm.pool != nil { + hm.pool.RemovePoolHook(hm.poolHooksRef) + } + } + + // Clear all active operations + hm.activeMovingOps.Range(func(key, value interface{}) bool { + hm.activeMovingOps.Delete(key) + return true + }) + + // Reset counter + hm.activeOperationCount.Store(0) + + return nil +} + +// GetState returns current state using atomic counter for lock-free operation. +func (hm *Manager) GetState() State { + if hm.activeOperationCount.Load() > 0 { + return StateMoving + } + return StateIdle +} + +// processPreHooks calls all pre-hooks and returns the modified notification and whether to continue processing. +func (hm *Manager) processPreHooks(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}) ([]interface{}, bool) { + hm.hooksMu.RLock() + defer hm.hooksMu.RUnlock() + + currentNotification := notification + + for _, hook := range hm.hooks { + modifiedNotification, shouldContinue := hook.PreHook(ctx, notificationCtx, notificationType, currentNotification) + if !shouldContinue { + return modifiedNotification, false + } + currentNotification = modifiedNotification + } + + return currentNotification, true +} + +// processPostHooks calls all post-hooks with the processing result. +func (hm *Manager) processPostHooks(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}, result error) { + hm.hooksMu.RLock() + defer hm.hooksMu.RUnlock() + + for _, hook := range hm.hooks { + hook.PostHook(ctx, notificationCtx, notificationType, notification, result) + } +} + +// createPoolHook creates a pool hook with this manager already set. +func (hm *Manager) createPoolHook(baseDialer func(context.Context, string, string) (net.Conn, error)) *PoolHook { + if hm.poolHooksRef != nil { + return hm.poolHooksRef + } + // Get pool size from client options for better worker defaults + poolSize := 0 + if hm.options != nil { + poolSize = hm.options.GetPoolSize() + } + + hm.poolHooksRef = NewPoolHookWithPoolSize(baseDialer, hm.options.GetNetwork(), hm.config, hm, poolSize) + hm.poolHooksRef.SetPool(hm.pool) + + return hm.poolHooksRef +} + +func (hm *Manager) AddNotificationHook(notificationHook NotificationHook) { + hm.hooksMu.Lock() + defer hm.hooksMu.Unlock() + hm.hooks = append(hm.hooks, notificationHook) +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/pool_hook.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/pool_hook.go new file mode 100644 index 000000000..9ea0558bf --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/pool_hook.go @@ -0,0 +1,182 @@ +package maintnotifications + +import ( + "context" + "net" + "sync" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" + "github.com/redis/go-redis/v9/internal/pool" +) + +// OperationsManagerInterface defines the interface for completing handoff operations +type OperationsManagerInterface interface { + TrackMovingOperationWithConnID(ctx context.Context, newEndpoint string, deadline time.Time, seqID int64, connID uint64) error + UntrackOperationWithConnID(seqID int64, connID uint64) +} + +// HandoffRequest represents a request to handoff a connection to a new endpoint +type HandoffRequest struct { + Conn *pool.Conn + ConnID uint64 // Unique connection identifier + Endpoint string + SeqID int64 + Pool pool.Pooler // Pool to remove connection from on failure +} + +// PoolHook implements pool.PoolHook for Redis-specific connection handling +// with maintenance notifications support. +type PoolHook struct { + // Base dialer for creating connections to new endpoints during handoffs + // args are network and address + baseDialer func(context.Context, string, string) (net.Conn, error) + + // Network type (e.g., "tcp", "unix") + network string + + // Worker manager for background handoff processing + workerManager *handoffWorkerManager + + // Configuration for the maintenance notifications + config *Config + + // Operations manager interface for operation completion tracking + operationsManager OperationsManagerInterface + + // Pool interface for removing connections on handoff failure + pool pool.Pooler +} + +// NewPoolHook creates a new pool hook +func NewPoolHook(baseDialer func(context.Context, string, string) (net.Conn, error), network string, config *Config, operationsManager OperationsManagerInterface) *PoolHook { + return NewPoolHookWithPoolSize(baseDialer, network, config, operationsManager, 0) +} + +// NewPoolHookWithPoolSize creates a new pool hook with pool size for better worker defaults +func NewPoolHookWithPoolSize(baseDialer func(context.Context, string, string) (net.Conn, error), network string, config *Config, operationsManager OperationsManagerInterface, poolSize int) *PoolHook { + // Apply defaults if config is nil or has zero values + if config == nil { + config = config.ApplyDefaultsWithPoolSize(poolSize) + } + + ph := &PoolHook{ + // baseDialer is used to create connections to new endpoints during handoffs + baseDialer: baseDialer, + network: network, + config: config, + operationsManager: operationsManager, + } + + // Create worker manager + ph.workerManager = newHandoffWorkerManager(config, ph) + + return ph +} + +// SetPool sets the pool interface for removing connections on handoff failure +func (ph *PoolHook) SetPool(pooler pool.Pooler) { + ph.pool = pooler +} + +// GetCurrentWorkers returns the current number of active workers (for testing) +func (ph *PoolHook) GetCurrentWorkers() int { + return ph.workerManager.getCurrentWorkers() +} + +// IsHandoffPending returns true if the given connection has a pending handoff +func (ph *PoolHook) IsHandoffPending(conn *pool.Conn) bool { + return ph.workerManager.isHandoffPending(conn) +} + +// GetPendingMap returns the pending map for testing purposes +func (ph *PoolHook) GetPendingMap() *sync.Map { + return ph.workerManager.getPendingMap() +} + +// GetMaxWorkers returns the max workers for testing purposes +func (ph *PoolHook) GetMaxWorkers() int { + return ph.workerManager.getMaxWorkers() +} + +// GetHandoffQueue returns the handoff queue for testing purposes +func (ph *PoolHook) GetHandoffQueue() chan HandoffRequest { + return ph.workerManager.getHandoffQueue() +} + +// GetCircuitBreakerStats returns circuit breaker statistics for monitoring +func (ph *PoolHook) GetCircuitBreakerStats() []CircuitBreakerStats { + return ph.workerManager.getCircuitBreakerStats() +} + +// ResetCircuitBreakers resets all circuit breakers (useful for testing) +func (ph *PoolHook) ResetCircuitBreakers() { + ph.workerManager.resetCircuitBreakers() +} + +// OnGet is called when a connection is retrieved from the pool +func (ph *PoolHook) OnGet(_ context.Context, conn *pool.Conn, _ bool) (accept bool, err error) { + // Check if connection is marked for handoff + // This prevents using connections that have received MOVING notifications + if conn.ShouldHandoff() { + return false, ErrConnectionMarkedForHandoffWithState + } + + // Check if connection is usable (not in UNUSABLE or CLOSED state) + // This ensures we don't return connections that are currently being handed off or re-authenticated. + if !conn.IsUsable() { + return false, ErrConnectionMarkedForHandoff + } + + return true, nil +} + +// OnPut is called when a connection is returned to the pool +func (ph *PoolHook) OnPut(ctx context.Context, conn *pool.Conn) (shouldPool bool, shouldRemove bool, err error) { + // first check if we should handoff for faster rejection + if !conn.ShouldHandoff() { + // Default behavior (no handoff): pool the connection + return true, false, nil + } + + // check pending handoff to not queue the same connection twice + if ph.workerManager.isHandoffPending(conn) { + // Default behavior (pending handoff): pool the connection + return true, false, nil + } + + if err := ph.workerManager.queueHandoff(conn); err != nil { + // Failed to queue handoff, remove the connection + internal.Logger.Printf(ctx, logs.FailedToQueueHandoff(conn.GetID(), err)) + // Don't pool, remove connection, no error to caller + return false, true, nil + } + + // Check if handoff was already processed by a worker before we can mark it as queued + if !conn.ShouldHandoff() { + // Handoff was already processed - this is normal and the connection should be pooled + return true, false, nil + } + + if err := conn.MarkQueuedForHandoff(); err != nil { + // If marking fails, check if handoff was processed in the meantime + if !conn.ShouldHandoff() { + // Handoff was processed - this is normal, pool the connection + return true, false, nil + } + // Other error - remove the connection + return false, true, nil + } + internal.Logger.Printf(ctx, logs.MarkedForHandoff(conn.GetID())) + return true, false, nil +} + +func (ph *PoolHook) OnRemove(_ context.Context, _ *pool.Conn, _ error) { + // Not used +} + +// Shutdown gracefully shuts down the processor, waiting for workers to complete +func (ph *PoolHook) Shutdown(ctx context.Context) error { + return ph.workerManager.shutdownWorkers(ctx) +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/push_notification_handler.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/push_notification_handler.go new file mode 100644 index 000000000..937b4ae82 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/push_notification_handler.go @@ -0,0 +1,282 @@ +package maintnotifications + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" + "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/push" +) + +// NotificationHandler handles push notifications for the simplified manager. +type NotificationHandler struct { + manager *Manager + operationsManager OperationsManagerInterface +} + +// HandlePushNotification processes push notifications with hook support. +func (snh *NotificationHandler) HandlePushNotification(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + if len(notification) == 0 { + internal.Logger.Printf(ctx, logs.InvalidNotificationFormat(notification)) + return ErrInvalidNotification + } + + notificationType, ok := notification[0].(string) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidNotificationTypeFormat(notification[0])) + return ErrInvalidNotification + } + + // Process pre-hooks - they can modify the notification or skip processing + modifiedNotification, shouldContinue := snh.manager.processPreHooks(ctx, handlerCtx, notificationType, notification) + if !shouldContinue { + return nil // Hooks decided to skip processing + } + + var err error + switch notificationType { + case NotificationMoving: + err = snh.handleMoving(ctx, handlerCtx, modifiedNotification) + case NotificationMigrating: + err = snh.handleMigrating(ctx, handlerCtx, modifiedNotification) + case NotificationMigrated: + err = snh.handleMigrated(ctx, handlerCtx, modifiedNotification) + case NotificationFailingOver: + err = snh.handleFailingOver(ctx, handlerCtx, modifiedNotification) + case NotificationFailedOver: + err = snh.handleFailedOver(ctx, handlerCtx, modifiedNotification) + default: + // Ignore other notification types (e.g., pub/sub messages) + err = nil + } + + // Process post-hooks with the result + snh.manager.processPostHooks(ctx, handlerCtx, notificationType, modifiedNotification, err) + + return err +} + +// handleMoving processes MOVING notifications. +// ["MOVING", seqNum, timeS, endpoint] - per-connection handoff +func (snh *NotificationHandler) handleMoving(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + if len(notification) < 3 { + internal.Logger.Printf(ctx, logs.InvalidNotification("MOVING", notification)) + return ErrInvalidNotification + } + seqID, ok := notification[1].(int64) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidSeqIDInMovingNotification(notification[1])) + return ErrInvalidNotification + } + + // Extract timeS + timeS, ok := notification[2].(int64) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidTimeSInMovingNotification(notification[2])) + return ErrInvalidNotification + } + + newEndpoint := "" + if len(notification) > 3 { + // Extract new endpoint + newEndpoint, ok = notification[3].(string) + if !ok { + stringified := fmt.Sprintf("%v", notification[3]) + // this could be which is valid + if notification[3] == nil || stringified == internal.RedisNull { + newEndpoint = "" + } else { + internal.Logger.Printf(ctx, logs.InvalidNewEndpointInMovingNotification(notification[3])) + return ErrInvalidNotification + } + } + } + + // Get the connection that received this notification + conn := handlerCtx.Conn + if conn == nil { + internal.Logger.Printf(ctx, logs.NoConnectionInHandlerContext("MOVING")) + return ErrInvalidNotification + } + + // Type assert to get the underlying pool connection + var poolConn *pool.Conn + if pc, ok := conn.(*pool.Conn); ok { + poolConn = pc + } else { + internal.Logger.Printf(ctx, logs.InvalidConnectionTypeInHandlerContext("MOVING", conn, handlerCtx)) + return ErrInvalidNotification + } + + // If the connection is closed or not pooled, we can ignore the notification + // this connection won't be remembered by the pool and will be garbage collected + // Keep pubsub connections around since they are not pooled but are long-lived + // and should be allowed to handoff (the pubsub instance will reconnect and change + // the underlying *pool.Conn) + if (poolConn.IsClosed() || !poolConn.IsPooled()) && !poolConn.IsPubSub() { + return nil + } + + deadline := time.Now().Add(time.Duration(timeS) * time.Second) + // If newEndpoint is empty, we should schedule a handoff to the current endpoint in timeS/2 seconds + if newEndpoint == "" || newEndpoint == internal.RedisNull { + if internal.LogLevel.DebugOrAbove() { + internal.Logger.Printf(ctx, logs.SchedulingHandoffToCurrentEndpoint(poolConn.GetID(), float64(timeS)/2)) + } + // same as current endpoint + newEndpoint = snh.manager.options.GetAddr() + // delay the handoff for timeS/2 seconds to the same endpoint + // do this in a goroutine to avoid blocking the notification handler + // NOTE: This timer is started while parsing the notification, so the connection is not marked for handoff + // and there should be no possibility of a race condition or double handoff. + time.AfterFunc(time.Duration(timeS/2)*time.Second, func() { + if poolConn == nil || poolConn.IsClosed() { + return + } + if err := snh.markConnForHandoff(poolConn, newEndpoint, seqID, deadline); err != nil { + // Log error but don't fail the goroutine - use background context since original may be cancelled + internal.Logger.Printf(context.Background(), logs.FailedToMarkForHandoff(poolConn.GetID(), err)) + } + }) + return nil + } + + return snh.markConnForHandoff(poolConn, newEndpoint, seqID, deadline) +} + +func (snh *NotificationHandler) markConnForHandoff(conn *pool.Conn, newEndpoint string, seqID int64, deadline time.Time) error { + if err := conn.MarkForHandoff(newEndpoint, seqID); err != nil { + internal.Logger.Printf(context.Background(), logs.FailedToMarkForHandoff(conn.GetID(), err)) + // Connection is already marked for handoff, which is acceptable + // This can happen if multiple MOVING notifications are received for the same connection + return nil + } + // Optionally track in m + if snh.operationsManager != nil { + connID := conn.GetID() + // Track the operation (ignore errors since this is optional) + _ = snh.operationsManager.TrackMovingOperationWithConnID(context.Background(), newEndpoint, deadline, seqID, connID) + } else { + return errors.New(logs.ManagerNotInitialized()) + } + return nil +} + +// handleMigrating processes MIGRATING notifications. +func (snh *NotificationHandler) handleMigrating(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + // MIGRATING notifications indicate that a connection is about to be migrated + // Apply relaxed timeouts to the specific connection that received this notification + if len(notification) < 2 { + internal.Logger.Printf(ctx, logs.InvalidNotification("MIGRATING", notification)) + return ErrInvalidNotification + } + + if handlerCtx.Conn == nil { + internal.Logger.Printf(ctx, logs.NoConnectionInHandlerContext("MIGRATING")) + return ErrInvalidNotification + } + + conn, ok := handlerCtx.Conn.(*pool.Conn) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidConnectionTypeInHandlerContext("MIGRATING", handlerCtx.Conn, handlerCtx)) + return ErrInvalidNotification + } + + // Apply relaxed timeout to this specific connection + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(ctx, logs.RelaxedTimeoutDueToNotification(conn.GetID(), "MIGRATING", snh.manager.config.RelaxedTimeout)) + } + conn.SetRelaxedTimeout(snh.manager.config.RelaxedTimeout, snh.manager.config.RelaxedTimeout) + return nil +} + +// handleMigrated processes MIGRATED notifications. +func (snh *NotificationHandler) handleMigrated(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + // MIGRATED notifications indicate that a connection migration has completed + // Restore normal timeouts for the specific connection that received this notification + if len(notification) < 2 { + internal.Logger.Printf(ctx, logs.InvalidNotification("MIGRATED", notification)) + return ErrInvalidNotification + } + + if handlerCtx.Conn == nil { + internal.Logger.Printf(ctx, logs.NoConnectionInHandlerContext("MIGRATED")) + return ErrInvalidNotification + } + + conn, ok := handlerCtx.Conn.(*pool.Conn) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidConnectionTypeInHandlerContext("MIGRATED", handlerCtx.Conn, handlerCtx)) + return ErrInvalidNotification + } + + // Clear relaxed timeout for this specific connection + if internal.LogLevel.InfoOrAbove() { + connID := conn.GetID() + internal.Logger.Printf(ctx, logs.UnrelaxedTimeout(connID)) + } + conn.ClearRelaxedTimeout() + return nil +} + +// handleFailingOver processes FAILING_OVER notifications. +func (snh *NotificationHandler) handleFailingOver(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + // FAILING_OVER notifications indicate that a connection is about to failover + // Apply relaxed timeouts to the specific connection that received this notification + if len(notification) < 2 { + internal.Logger.Printf(ctx, logs.InvalidNotification("FAILING_OVER", notification)) + return ErrInvalidNotification + } + + if handlerCtx.Conn == nil { + internal.Logger.Printf(ctx, logs.NoConnectionInHandlerContext("FAILING_OVER")) + return ErrInvalidNotification + } + + conn, ok := handlerCtx.Conn.(*pool.Conn) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidConnectionTypeInHandlerContext("FAILING_OVER", handlerCtx.Conn, handlerCtx)) + return ErrInvalidNotification + } + + // Apply relaxed timeout to this specific connection + if internal.LogLevel.InfoOrAbove() { + connID := conn.GetID() + internal.Logger.Printf(ctx, logs.RelaxedTimeoutDueToNotification(connID, "FAILING_OVER", snh.manager.config.RelaxedTimeout)) + } + conn.SetRelaxedTimeout(snh.manager.config.RelaxedTimeout, snh.manager.config.RelaxedTimeout) + return nil +} + +// handleFailedOver processes FAILED_OVER notifications. +func (snh *NotificationHandler) handleFailedOver(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + // FAILED_OVER notifications indicate that a connection failover has completed + // Restore normal timeouts for the specific connection that received this notification + if len(notification) < 2 { + internal.Logger.Printf(ctx, logs.InvalidNotification("FAILED_OVER", notification)) + return ErrInvalidNotification + } + + if handlerCtx.Conn == nil { + internal.Logger.Printf(ctx, logs.NoConnectionInHandlerContext("FAILED_OVER")) + return ErrInvalidNotification + } + + conn, ok := handlerCtx.Conn.(*pool.Conn) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidConnectionTypeInHandlerContext("FAILED_OVER", handlerCtx.Conn, handlerCtx)) + return ErrInvalidNotification + } + + // Clear relaxed timeout for this specific connection + if internal.LogLevel.InfoOrAbove() { + connID := conn.GetID() + internal.Logger.Printf(ctx, logs.UnrelaxedTimeout(connID)) + } + conn.ClearRelaxedTimeout() + return nil +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/state.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/state.go new file mode 100644 index 000000000..8180bcd97 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/state.go @@ -0,0 +1,24 @@ +package maintnotifications + +// State represents the current state of a maintenance operation +type State int + +const ( + // StateIdle indicates no upgrade is in progress + StateIdle State = iota + + // StateHandoff indicates a connection handoff is in progress + StateMoving +) + +// String returns a string representation of the state. +func (s State) String() string { + switch s { + case StateIdle: + return "idle" + case StateMoving: + return "moving" + default: + return "unknown" + } +} diff --git a/vendor/github.com/redis/go-redis/v9/options.go b/vendor/github.com/redis/go-redis/v9/options.go index 020e76b4a..9773e86f7 100644 --- a/vendor/github.com/redis/go-redis/v9/options.go +++ b/vendor/github.com/redis/go-redis/v9/options.go @@ -13,7 +13,12 @@ import ( "strings" "time" + "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/internal/util" + "github.com/redis/go-redis/v9/maintnotifications" + "github.com/redis/go-redis/v9/push" ) // Limiter is the interface of a rate limiter or a circuit breaker. @@ -29,10 +34,12 @@ type Limiter interface { // Options keeps the settings to set up redis connection. type Options struct { - // The network type, either tcp or unix. - // Default is tcp. + // Network type, either tcp or unix. + // + // default: is tcp. Network string - // host:port address. + + // Addr is the address formated as host:port Addr string // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn. @@ -46,17 +53,21 @@ type Options struct { OnConnect func(ctx context.Context, cn *Conn) error // Protocol 2 or 3. Use the version to negotiate RESP version with redis-server. - // Default is 3. + // + // default: 3. Protocol int - // Use the specified Username to authenticate the current connection + + // Username is used to authenticate the current connection // with one of the connections defined in the ACL list when connecting // to a Redis 6.0 instance, or greater, that is using the Redis ACL system. Username string - // Optional password. Must match the password specified in the - // requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower), + + // Password is an optional password. Must match the password specified in the + // `requirepass` server configuration option (if connecting to a Redis 5.0 instance, or lower), // or the User Password when connecting to a Redis 6.0 instance, or greater, // that is using the Redis ACL system. Password string + // CredentialsProvider allows the username and password to be updated // before reconnecting. It should return the current username and password. CredentialsProvider func() (username string, password string) @@ -67,85 +78,155 @@ type Options struct { // There will be a conflict between them; if CredentialsProviderContext exists, we will ignore CredentialsProvider. CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) - // Database to be selected after connecting to the server. + // StreamingCredentialsProvider is used to retrieve the credentials + // for the connection from an external source. Those credentials may change + // during the connection lifetime. This is useful for managed identity + // scenarios where the credentials are retrieved from an external source. + // + // Currently, this is a placeholder for the future implementation. + StreamingCredentialsProvider auth.StreamingCredentialsProvider + + // DB is the database to be selected after connecting to the server. DB int - // Maximum number of retries before giving up. - // Default is 3 retries; -1 (not 0) disables retries. + // MaxRetries is the maximum number of retries before giving up. + // -1 (not 0) disables retries. + // + // default: 3 retries MaxRetries int - // Minimum backoff between each retry. - // Default is 8 milliseconds; -1 disables backoff. + + // MinRetryBackoff is the minimum backoff between each retry. + // -1 disables backoff. + // + // default: 8 milliseconds MinRetryBackoff time.Duration - // Maximum backoff between each retry. - // Default is 512 milliseconds; -1 disables backoff. + + // MaxRetryBackoff is the maximum backoff between each retry. + // -1 disables backoff. + // default: 512 milliseconds; MaxRetryBackoff time.Duration - // Dial timeout for establishing new connections. - // Default is 5 seconds. + // DialTimeout for establishing new connections. + // + // default: 5 seconds DialTimeout time.Duration - // Timeout for socket reads. If reached, commands will fail + + // DialerRetries is the maximum number of retry attempts when dialing fails. + // + // default: 5 + DialerRetries int + + // DialerRetryTimeout is the backoff duration between retry attempts. + // + // default: 100 milliseconds + DialerRetryTimeout time.Duration + + // ReadTimeout for socket reads. If reached, commands will fail // with a timeout instead of blocking. Supported values: - // - `0` - default timeout (3 seconds). - // - `-1` - no timeout (block indefinitely). - // - `-2` - disables SetReadDeadline calls completely. + // + // - `-1` - no timeout (block indefinitely). + // - `-2` - disables SetReadDeadline calls completely. + // + // default: 3 seconds ReadTimeout time.Duration - // Timeout for socket writes. If reached, commands will fail + + // WriteTimeout for socket writes. If reached, commands will fail // with a timeout instead of blocking. Supported values: - // - `0` - default timeout (3 seconds). - // - `-1` - no timeout (block indefinitely). - // - `-2` - disables SetWriteDeadline calls completely. + // + // - `-1` - no timeout (block indefinitely). + // - `-2` - disables SetWriteDeadline calls completely. + // + // default: 3 seconds WriteTimeout time.Duration + // ContextTimeoutEnabled controls whether the client respects context timeouts and deadlines. // See https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts ContextTimeoutEnabled bool - // Type of connection pool. - // true for FIFO pool, false for LIFO pool. + // ReadBufferSize is the size of the bufio.Reader buffer for each connection. + // Larger buffers can improve performance for commands that return large responses. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + ReadBufferSize int + + // WriteBufferSize is the size of the bufio.Writer buffer for each connection. + // Larger buffers can improve performance for large pipelines and commands with many arguments. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + WriteBufferSize int + + // PoolFIFO type of connection pool. + // + // - true for FIFO pool + // - false for LIFO pool. + // // Note that FIFO has slightly higher overhead compared to LIFO, // but it helps closing idle connections faster reducing the pool size. + // default: false PoolFIFO bool - // Base number of socket connections. + + // PoolSize is the base number of socket connections. // Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS. // If there is not enough connections in the pool, new connections will be allocated in excess of PoolSize, // you can limit it through MaxActiveConns + // + // default: 10 * runtime.GOMAXPROCS(0) PoolSize int - // Amount of time client waits for connection if all connections + + // MaxConcurrentDials is the maximum number of concurrent connection creation goroutines. + // If <= 0, defaults to PoolSize. If > PoolSize, it will be capped at PoolSize. + MaxConcurrentDials int + + // PoolTimeout is the amount of time client waits for connection if all connections // are busy before returning an error. - // Default is ReadTimeout + 1 second. + // + // default: ReadTimeout + 1 second PoolTimeout time.Duration - // Minimum number of idle connections which is useful when establishing - // new connection is slow. - // Default is 0. the idle connections are not closed by default. + + // MinIdleConns is the minimum number of idle connections which is useful when establishing + // new connection is slow. The idle connections are not closed by default. + // + // default: 0 MinIdleConns int - // Maximum number of idle connections. - // Default is 0. the idle connections are not closed by default. + + // MaxIdleConns is the maximum number of idle connections. + // The idle connections are not closed by default. + // + // default: 0 MaxIdleConns int - // Maximum number of connections allocated by the pool at a given time. + + // MaxActiveConns is the maximum number of connections allocated by the pool at a given time. // When zero, there is no limit on the number of connections in the pool. + // If the pool is full, the next call to Get() will block until a connection is released. MaxActiveConns int + // ConnMaxIdleTime is the maximum amount of time a connection may be idle. // Should be less than server's timeout. // // Expired connections may be closed lazily before reuse. // If d <= 0, connections are not closed due to a connection's idle time. + // -1 disables idle timeout check. // - // Default is 30 minutes. -1 disables idle timeout check. + // default: 30 minutes ConnMaxIdleTime time.Duration + // ConnMaxLifetime is the maximum amount of time a connection may be reused. // // Expired connections may be closed lazily before reuse. // If <= 0, connections are not closed due to a connection's age. // - // Default is to not close idle connections. + // default: 0 ConnMaxLifetime time.Duration - // TLS Config to use. When set, TLS will be negotiated. + // TLSConfig to use. When set, TLS will be negotiated. TLSConfig *tls.Config // Limiter interface used to implement circuit breaker or rate limiter. Limiter Limiter - // Enables read only queries on slave/follower nodes. + // readOnly enables read only queries on slave/follower nodes. readOnly bool // DisableIndentity - Disable set-lib on connect. @@ -161,7 +242,31 @@ type Options struct { DisableIdentity bool // Add suffix to client name. Default is empty. + // IdentitySuffix - add suffix to client name. IdentitySuffix string + + // UnstableResp3 enables Unstable mode for Redis Search module with RESP3. + // When unstable mode is enabled, the client will use RESP3 protocol and only be able to use RawResult + UnstableResp3 bool + + // Push notifications are always enabled for RESP3 connections (Protocol: 3) + // and are not available for RESP2 connections. No configuration option is needed. + + // PushNotificationProcessor is the processor for handling push notifications. + // If nil, a default processor will be created for RESP3 connections. + PushNotificationProcessor push.NotificationProcessor + + // FailingTimeoutSeconds is the timeout in seconds for marking a cluster node as failing. + // When a node is marked as failing, it will be avoided for this duration. + // Default is 15 seconds. + FailingTimeoutSeconds int + + // MaintNotificationsConfig provides custom configuration for maintnotifications. + // When MaintNotificationsConfig.Mode is not "disabled", the client will handle + // cluster upgrade notifications gracefully and manage connection/pool state + // transitions seamlessly. Requires Protocol: 3 (RESP3) for push notifications. + // If nil, maintnotifications are in "auto" mode and will be enabled if the server supports it. + MaintNotificationsConfig *maintnotifications.Config } func (opt *Options) init() { @@ -175,15 +280,35 @@ func (opt *Options) init() { opt.Network = "tcp" } } + if opt.Protocol < 2 { + opt.Protocol = 3 + } if opt.DialTimeout == 0 { opt.DialTimeout = 5 * time.Second } + if opt.DialerRetries == 0 { + opt.DialerRetries = 5 + } + if opt.DialerRetryTimeout == 0 { + opt.DialerRetryTimeout = 100 * time.Millisecond + } if opt.Dialer == nil { opt.Dialer = NewDialer(opt) } if opt.PoolSize == 0 { opt.PoolSize = 10 * runtime.GOMAXPROCS(0) } + if opt.MaxConcurrentDials <= 0 { + opt.MaxConcurrentDials = opt.PoolSize + } else if opt.MaxConcurrentDials > opt.PoolSize { + opt.MaxConcurrentDials = opt.PoolSize + } + if opt.ReadBufferSize == 0 { + opt.ReadBufferSize = proto.DefaultBufferSize + } + if opt.WriteBufferSize == 0 { + opt.WriteBufferSize = proto.DefaultBufferSize + } switch opt.ReadTimeout { case -2: opt.ReadTimeout = -1 @@ -211,9 +336,10 @@ func (opt *Options) init() { opt.ConnMaxIdleTime = 30 * time.Minute } - if opt.MaxRetries == -1 { + switch opt.MaxRetries { + case -1: opt.MaxRetries = 0 - } else if opt.MaxRetries == 0 { + case 0: opt.MaxRetries = 3 } switch opt.MinRetryBackoff { @@ -228,13 +354,40 @@ func (opt *Options) init() { case 0: opt.MaxRetryBackoff = 512 * time.Millisecond } + + if opt.FailingTimeoutSeconds == 0 { + opt.FailingTimeoutSeconds = 15 + } + + opt.MaintNotificationsConfig = opt.MaintNotificationsConfig.ApplyDefaultsWithPoolConfig(opt.PoolSize, opt.MaxActiveConns) + + // auto-detect endpoint type if not specified + endpointType := opt.MaintNotificationsConfig.EndpointType + if endpointType == "" || endpointType == maintnotifications.EndpointTypeAuto { + // Auto-detect endpoint type if not specified + endpointType = maintnotifications.DetectEndpointType(opt.Addr, opt.TLSConfig != nil) + } + opt.MaintNotificationsConfig.EndpointType = endpointType } func (opt *Options) clone() *Options { clone := *opt + + // Deep clone MaintNotificationsConfig to avoid sharing between clients + if opt.MaintNotificationsConfig != nil { + configClone := *opt.MaintNotificationsConfig + clone.MaintNotificationsConfig = &configClone + } + return &clone } +// NewDialer returns a function that will be used as the default dialer +// when none is specified in Options.Dialer. +func (opt *Options) NewDialer() func(context.Context, string, string) (net.Conn, error) { + return NewDialer(opt) +} + // NewDialer returns a function that will be used as the default dialer // when none is specified in Options.Dialer. func NewDialer(opt *Options) func(context.Context, string, string) (net.Conn, error) { @@ -273,6 +426,7 @@ func NewDialer(opt *Options) func(context.Context, string, string) (net.Conn, er // URL attributes (scheme, host, userinfo, resp.), query parameters using these // names will be treated as unknown parameters // - unknown parameter names will result in an error +// - use "skip_verify=true" to ignore TLS certificate validation // // Examples: // @@ -480,6 +634,7 @@ func setupConnParams(u *url.URL, o *Options) (*Options, error) { o.MinIdleConns = q.int("min_idle_conns") o.MaxIdleConns = q.int("max_idle_conns") o.MaxActiveConns = q.int("max_active_conns") + o.MaxConcurrentDials = q.int("max_concurrent_dials") if q.has("conn_max_idle_time") { o.ConnMaxIdleTime = q.duration("conn_max_idle_time") } else { @@ -493,6 +648,9 @@ func setupConnParams(u *url.URL, o *Options) (*Options, error) { if q.err != nil { return nil, q.err } + if o.TLSConfig != nil && q.has("skip_verify") { + o.TLSConfig.InsecureSkipVerify = q.bool("skip_verify") + } // any parameters left? if r := q.remaining(); len(r) > 0 { @@ -516,18 +674,86 @@ func getUserPassword(u *url.URL) (string, string) { func newConnPool( opt *Options, dialer func(ctx context.Context, network, addr string) (net.Conn, error), -) *pool.ConnPool { +) (*pool.ConnPool, error) { + poolSize, err := util.SafeIntToInt32(opt.PoolSize, "PoolSize") + if err != nil { + return nil, err + } + + minIdleConns, err := util.SafeIntToInt32(opt.MinIdleConns, "MinIdleConns") + if err != nil { + return nil, err + } + + maxIdleConns, err := util.SafeIntToInt32(opt.MaxIdleConns, "MaxIdleConns") + if err != nil { + return nil, err + } + + maxActiveConns, err := util.SafeIntToInt32(opt.MaxActiveConns, "MaxActiveConns") + if err != nil { + return nil, err + } + return pool.NewConnPool(&pool.Options{ Dialer: func(ctx context.Context) (net.Conn, error) { return dialer(ctx, opt.Network, opt.Addr) }, - PoolFIFO: opt.PoolFIFO, - PoolSize: opt.PoolSize, - PoolTimeout: opt.PoolTimeout, - MinIdleConns: opt.MinIdleConns, - MaxIdleConns: opt.MaxIdleConns, - MaxActiveConns: opt.MaxActiveConns, - ConnMaxIdleTime: opt.ConnMaxIdleTime, - ConnMaxLifetime: opt.ConnMaxLifetime, - }) + PoolFIFO: opt.PoolFIFO, + PoolSize: poolSize, + MaxConcurrentDials: opt.MaxConcurrentDials, + PoolTimeout: opt.PoolTimeout, + DialTimeout: opt.DialTimeout, + DialerRetries: opt.DialerRetries, + DialerRetryTimeout: opt.DialerRetryTimeout, + MinIdleConns: minIdleConns, + MaxIdleConns: maxIdleConns, + MaxActiveConns: maxActiveConns, + ConnMaxIdleTime: opt.ConnMaxIdleTime, + ConnMaxLifetime: opt.ConnMaxLifetime, + ReadBufferSize: opt.ReadBufferSize, + WriteBufferSize: opt.WriteBufferSize, + PushNotificationsEnabled: opt.Protocol == 3, + }), nil +} + +func newPubSubPool(opt *Options, dialer func(ctx context.Context, network, addr string) (net.Conn, error), +) (*pool.PubSubPool, error) { + poolSize, err := util.SafeIntToInt32(opt.PoolSize, "PoolSize") + if err != nil { + return nil, err + } + + minIdleConns, err := util.SafeIntToInt32(opt.MinIdleConns, "MinIdleConns") + if err != nil { + return nil, err + } + + maxIdleConns, err := util.SafeIntToInt32(opt.MaxIdleConns, "MaxIdleConns") + if err != nil { + return nil, err + } + + maxActiveConns, err := util.SafeIntToInt32(opt.MaxActiveConns, "MaxActiveConns") + if err != nil { + return nil, err + } + + return pool.NewPubSubPool(&pool.Options{ + PoolFIFO: opt.PoolFIFO, + PoolSize: poolSize, + MaxConcurrentDials: opt.MaxConcurrentDials, + PoolTimeout: opt.PoolTimeout, + DialTimeout: opt.DialTimeout, + DialerRetries: opt.DialerRetries, + DialerRetryTimeout: opt.DialerRetryTimeout, + MinIdleConns: minIdleConns, + MaxIdleConns: maxIdleConns, + MaxActiveConns: maxActiveConns, + ConnMaxIdleTime: opt.ConnMaxIdleTime, + ConnMaxLifetime: opt.ConnMaxLifetime, + ReadBufferSize: 32 * 1024, + WriteBufferSize: 32 * 1024, + PushNotificationsEnabled: opt.Protocol == 3, + }, dialer), nil } diff --git a/vendor/github.com/redis/go-redis/v9/osscluster.go b/vendor/github.com/redis/go-redis/v9/osscluster.go index 5ddedf7b4..7925d2c60 100644 --- a/vendor/github.com/redis/go-redis/v9/osscluster.go +++ b/vendor/github.com/redis/go-redis/v9/osscluster.go @@ -14,11 +14,18 @@ import ( "sync/atomic" "time" + "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/hashtag" "github.com/redis/go-redis/v9/internal/pool" "github.com/redis/go-redis/v9/internal/proto" "github.com/redis/go-redis/v9/internal/rand" + "github.com/redis/go-redis/v9/maintnotifications" + "github.com/redis/go-redis/v9/push" +) + +const ( + minLatencyMeasurementInterval = 10 * time.Second ) var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") @@ -33,6 +40,7 @@ type ClusterOptions struct { ClientName string // NewClient creates a cluster node client with provided name and options. + // If NewClient is set by the user, the user is responsible for handling maintnotifications upgrades and push notifications. NewClient func(opt *Options) *Client // The maximum number of retries before giving up. Command is retried @@ -62,12 +70,17 @@ type ClusterOptions struct { OnConnect func(ctx context.Context, cn *Conn) error - Protocol int - Username string - Password string - CredentialsProvider func() (username string, password string) - CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) - + Protocol int + Username string + Password string + CredentialsProvider func() (username string, password string) + CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) + StreamingCredentialsProvider auth.StreamingCredentialsProvider + + // MaxRetries is the maximum number of retries before giving up. + // For ClusterClient, retries are disabled by default (set to -1), + // because the cluster client handles all kinds of retries internally. + // This is intentional and differs from the standalone Options default. MaxRetries int MinRetryBackoff time.Duration MaxRetryBackoff time.Duration @@ -86,6 +99,20 @@ type ClusterOptions struct { ConnMaxIdleTime time.Duration ConnMaxLifetime time.Duration + // ReadBufferSize is the size of the bufio.Reader buffer for each connection. + // Larger buffers can improve performance for commands that return large responses. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + ReadBufferSize int + + // WriteBufferSize is the size of the bufio.Writer buffer for each connection. + // Larger buffers can improve performance for large pipelines and commands with many arguments. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + WriteBufferSize int + TLSConfig *tls.Config // DisableIndentity - Disable set-lib on connect. @@ -101,12 +128,33 @@ type ClusterOptions struct { DisableIdentity bool IdentitySuffix string // Add suffix to client name. Default is empty. + + // UnstableResp3 enables Unstable mode for Redis Search module with RESP3. + UnstableResp3 bool + + // PushNotificationProcessor is the processor for handling push notifications. + // If nil, a default processor will be created for RESP3 connections. + PushNotificationProcessor push.NotificationProcessor + + // FailingTimeoutSeconds is the timeout in seconds for marking a cluster node as failing. + // When a node is marked as failing, it will be avoided for this duration. + // Default is 15 seconds. + FailingTimeoutSeconds int + + // MaintNotificationsConfig provides custom configuration for maintnotifications upgrades. + // When MaintNotificationsConfig.Mode is not "disabled", the client will handle + // cluster upgrade notifications gracefully and manage connection/pool state + // transitions seamlessly. Requires Protocol: 3 (RESP3) for push notifications. + // If nil, maintnotifications upgrades are in "auto" mode and will be enabled if the server supports it. + // The ClusterClient does not directly work with maintnotifications, it is up to the clients in the Nodes map to work with maintnotifications. + MaintNotificationsConfig *maintnotifications.Config } func (opt *ClusterOptions) init() { - if opt.MaxRedirects == -1 { + switch opt.MaxRedirects { + case -1: opt.MaxRedirects = 0 - } else if opt.MaxRedirects == 0 { + case 0: opt.MaxRedirects = 3 } @@ -117,6 +165,12 @@ func (opt *ClusterOptions) init() { if opt.PoolSize == 0 { opt.PoolSize = 5 * runtime.GOMAXPROCS(0) } + if opt.ReadBufferSize == 0 { + opt.ReadBufferSize = proto.DefaultBufferSize + } + if opt.WriteBufferSize == 0 { + opt.WriteBufferSize = proto.DefaultBufferSize + } switch opt.ReadTimeout { case -1: @@ -150,6 +204,10 @@ func (opt *ClusterOptions) init() { if opt.NewClient == nil { opt.NewClient = NewClient } + + if opt.FailingTimeoutSeconds == 0 { + opt.FailingTimeoutSeconds = 15 + } } // ParseClusterURL parses a URL into ClusterOptions that can be used to connect to Redis. @@ -254,6 +312,7 @@ func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, er o.PoolTimeout = q.duration("pool_timeout") o.ConnMaxLifetime = q.duration("conn_max_lifetime") o.ConnMaxIdleTime = q.duration("conn_max_idle_time") + o.FailingTimeoutSeconds = q.int("failing_timeout_seconds") if q.err != nil { return nil, q.err @@ -279,16 +338,24 @@ func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, er } func (opt *ClusterOptions) clientOptions() *Options { + // Clone MaintNotificationsConfig to avoid sharing between cluster node clients + var maintNotificationsConfig *maintnotifications.Config + if opt.MaintNotificationsConfig != nil { + configClone := *opt.MaintNotificationsConfig + maintNotificationsConfig = &configClone + } + return &Options{ ClientName: opt.ClientName, Dialer: opt.Dialer, OnConnect: opt.OnConnect, - Protocol: opt.Protocol, - Username: opt.Username, - Password: opt.Password, - CredentialsProvider: opt.CredentialsProvider, - CredentialsProviderContext: opt.CredentialsProviderContext, + Protocol: opt.Protocol, + Username: opt.Username, + Password: opt.Password, + CredentialsProvider: opt.CredentialsProvider, + CredentialsProviderContext: opt.CredentialsProviderContext, + StreamingCredentialsProvider: opt.StreamingCredentialsProvider, MaxRetries: opt.MaxRetries, MinRetryBackoff: opt.MinRetryBackoff, @@ -299,24 +366,30 @@ func (opt *ClusterOptions) clientOptions() *Options { WriteTimeout: opt.WriteTimeout, ContextTimeoutEnabled: opt.ContextTimeoutEnabled, - PoolFIFO: opt.PoolFIFO, - PoolSize: opt.PoolSize, - PoolTimeout: opt.PoolTimeout, - MinIdleConns: opt.MinIdleConns, - MaxIdleConns: opt.MaxIdleConns, - MaxActiveConns: opt.MaxActiveConns, - ConnMaxIdleTime: opt.ConnMaxIdleTime, - ConnMaxLifetime: opt.ConnMaxLifetime, - DisableIdentity: opt.DisableIdentity, - DisableIndentity: opt.DisableIdentity, - IdentitySuffix: opt.IdentitySuffix, - TLSConfig: opt.TLSConfig, + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + MinIdleConns: opt.MinIdleConns, + MaxIdleConns: opt.MaxIdleConns, + MaxActiveConns: opt.MaxActiveConns, + ConnMaxIdleTime: opt.ConnMaxIdleTime, + ConnMaxLifetime: opt.ConnMaxLifetime, + ReadBufferSize: opt.ReadBufferSize, + WriteBufferSize: opt.WriteBufferSize, + DisableIdentity: opt.DisableIdentity, + DisableIndentity: opt.DisableIdentity, + IdentitySuffix: opt.IdentitySuffix, + FailingTimeoutSeconds: opt.FailingTimeoutSeconds, + TLSConfig: opt.TLSConfig, // If ClusterSlots is populated, then we probably have an artificial // cluster whose nodes are not in clustering mode (otherwise there isn't // much use for ClusterSlots config). This means we cannot execute the // READONLY command against that node -- setting readOnly to false in such // situations in the options below will prevent that from happening. - readOnly: opt.ReadOnly && opt.ClusterSlots == nil, + readOnly: opt.ReadOnly && opt.ClusterSlots == nil, + UnstableResp3: opt.UnstableResp3, + MaintNotificationsConfig: maintNotificationsConfig, + PushNotificationProcessor: opt.PushNotificationProcessor, } } @@ -328,6 +401,10 @@ type clusterNode struct { latency uint32 // atomic generation uint32 // atomic failing uint32 // atomic + loaded uint32 // atomic + + // last time the latency measurement was performed for the node, stored in nanoseconds from epoch + lastLatencyMeasurement int64 // atomic } func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { @@ -380,6 +457,7 @@ func (n *clusterNode) updateLatency() { latency = float64(dur) / float64(successes) } atomic.StoreUint32(&n.latency, uint32(latency+0.5)) + n.SetLastLatencyMeasurement(time.Now()) } func (n *clusterNode) Latency() time.Duration { @@ -389,10 +467,11 @@ func (n *clusterNode) Latency() time.Duration { func (n *clusterNode) MarkAsFailing() { atomic.StoreUint32(&n.failing, uint32(time.Now().Unix())) + atomic.StoreUint32(&n.loaded, 0) } func (n *clusterNode) Failing() bool { - const timeout = 15 // 15 seconds + timeout := int64(n.Client.opt.FailingTimeoutSeconds) failing := atomic.LoadUint32(&n.failing) if failing == 0 { @@ -409,6 +488,10 @@ func (n *clusterNode) Generation() uint32 { return atomic.LoadUint32(&n.generation) } +func (n *clusterNode) LastLatencyMeasurement() int64 { + return atomic.LoadInt64(&n.lastLatencyMeasurement) +} + func (n *clusterNode) SetGeneration(gen uint32) { for { v := atomic.LoadUint32(&n.generation) @@ -418,6 +501,33 @@ func (n *clusterNode) SetGeneration(gen uint32) { } } +func (n *clusterNode) SetLastLatencyMeasurement(t time.Time) { + for { + v := atomic.LoadInt64(&n.lastLatencyMeasurement) + if t.UnixNano() < v || atomic.CompareAndSwapInt64(&n.lastLatencyMeasurement, v, t.UnixNano()) { + break + } + } +} + +func (n *clusterNode) Loading() bool { + loaded := atomic.LoadUint32(&n.loaded) + if loaded == 1 { + return false + } + + // check if the node is loading + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + err := n.Client.Ping(ctx).Err() + loading := err != nil && isLoadingError(err) + if !loading { + atomic.StoreUint32(&n.loaded, 1) + } + return loading +} + //------------------------------------------------------------------------------ type clusterNodes struct { @@ -430,13 +540,12 @@ type clusterNodes struct { closed bool onNewNode []func(rdb *Client) - _generation uint32 // atomic + generation uint32 // atomic } func newClusterNodes(opt *ClusterOptions) *clusterNodes { return &clusterNodes{ - opt: opt, - + opt: opt, addrs: opt.Addrs, nodes: make(map[string]*clusterNode), } @@ -477,9 +586,11 @@ func (c *clusterNodes) Addrs() ([]string, error) { closed := c.closed //nolint:ifshort if !closed { if len(c.activeAddrs) > 0 { - addrs = c.activeAddrs + addrs = make([]string, len(c.activeAddrs)) + copy(addrs, c.activeAddrs) } else { - addrs = c.addrs + addrs = make([]string, len(c.addrs)) + copy(addrs, c.addrs) } } c.mu.RUnlock() @@ -494,21 +605,21 @@ func (c *clusterNodes) Addrs() ([]string, error) { } func (c *clusterNodes) NextGeneration() uint32 { - return atomic.AddUint32(&c._generation, 1) + return atomic.AddUint32(&c.generation, 1) } // GC removes unused nodes. func (c *clusterNodes) GC(generation uint32) { - //nolint:prealloc var collected []*clusterNode c.mu.Lock() c.activeAddrs = c.activeAddrs[:0] + now := time.Now() for addr, node := range c.nodes { if node.Generation() >= generation { c.activeAddrs = append(c.activeAddrs, addr) - if c.opt.RouteByLatency { + if c.opt.RouteByLatency && node.LastLatencyMeasurement() < now.Add(-minLatencyMeasurementInterval).UnixNano() { go node.updateLatency() } continue @@ -551,23 +662,20 @@ func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { fn(node.Client) } - c.addrs = appendIfNotExists(c.addrs, addr) + c.addrs = appendIfNotExist(c.addrs, addr) c.nodes[addr] = node return node, nil } func (c *clusterNodes) get(addr string) (*clusterNode, error) { - var node *clusterNode - var err error c.mu.RLock() + defer c.mu.RUnlock() + if c.closed { - err = pool.ErrClosed - } else { - node = c.nodes[addr] + return nil, pool.ErrClosed } - c.mu.RUnlock() - return node, err + return c.nodes[addr], nil } func (c *clusterNodes) All() ([]*clusterNode, error) { @@ -598,8 +706,9 @@ func (c *clusterNodes) Random() (*clusterNode, error) { //------------------------------------------------------------------------------ type clusterSlot struct { - start, end int - nodes []*clusterNode + start int + end int + nodes []*clusterNode } type clusterSlotSlice []*clusterSlot @@ -659,9 +768,9 @@ func newClusterState( nodes = append(nodes, node) if i == 0 { - c.Masters = appendUniqueNode(c.Masters, node) + c.Masters = appendIfNotExist(c.Masters, node) } else { - c.Slaves = appendUniqueNode(c.Slaves, node) + c.Slaves = appendIfNotExist(c.Slaves, node) } } @@ -700,12 +809,25 @@ func replaceLoopbackHost(nodeAddr, originHost string) string { return net.JoinHostPort(originHost, nodePort) } +// isLoopback returns true if the host is a loopback address. +// For IP addresses, it uses net.IP.IsLoopback(). +// For hostnames, it recognizes well-known loopback hostnames like "localhost" +// and Docker-specific loopback patterns like "*.docker.internal". func isLoopback(host string) bool { ip := net.ParseIP(host) - if ip == nil { + if ip != nil { + return ip.IsLoopback() + } + + if strings.ToLower(host) == "localhost" { + return true + } + + if strings.HasSuffix(strings.ToLower(host), ".docker.internal") { return true } - return ip.IsLoopback() + + return false } func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { @@ -724,7 +846,8 @@ func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { case 1: return nodes[0], nil case 2: - if slave := nodes[1]; !slave.Failing() { + slave := nodes[1] + if !slave.Failing() && !slave.Loading() { return slave, nil } return nodes[0], nil @@ -733,7 +856,7 @@ func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { for i := 0; i < 10; i++ { n := rand.Intn(len(nodes)-1) + 1 slave = nodes[n] - if !slave.Failing() { + if !slave.Failing() && !slave.Loading() { return slave, nil } } @@ -894,6 +1017,9 @@ type ClusterClient struct { // NewClusterClient returns a Redis Cluster client as described in // http://redis.io/topics/cluster-spec. func NewClusterClient(opt *ClusterOptions) *ClusterClient { + if opt == nil { + panic("redis: NewClusterClient nil options") + } opt.init() c := &ClusterClient{ @@ -934,13 +1060,6 @@ func (c *ClusterClient) Close() error { return c.nodes.Close() } -// Do create a Cmd from the args and processes the cmd. -func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { - cmd := NewCmd(ctx, args...) - _ = c.Process(ctx, cmd) - return cmd -} - func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { err := c.processHook(ctx, cmd) cmd.SetErr(err) @@ -948,7 +1067,7 @@ func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { } func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { - slot := c.cmdSlot(ctx, cmd) + slot := c.cmdSlot(cmd, -1) var node *clusterNode var moved bool var ask bool @@ -1223,7 +1342,7 @@ func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) { continue } - return newClusterState(c.nodes, slots, node.Client.opt.Addr) + return newClusterState(c.nodes, slots, addr) } /* @@ -1294,9 +1413,13 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd return err } + preferredRandomSlot := -1 if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) { for _, cmd := range cmds { - slot := c.cmdSlot(ctx, cmd) + slot := c.cmdSlot(cmd, preferredRandomSlot) + if preferredRandomSlot == -1 { + preferredRandomSlot = slot + } node, err := c.slotReadOnlyNode(state, slot) if err != nil { return err @@ -1307,7 +1430,10 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd } for _, cmd := range cmds { - slot := c.cmdSlot(ctx, cmd) + slot := c.cmdSlot(cmd, preferredRandomSlot) + if preferredRandomSlot == -1 { + preferredRandomSlot = slot + } node, err := state.slotMasterNode(slot) if err != nil { return err @@ -1333,7 +1459,9 @@ func (c *ClusterClient) processPipelineNode( _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { cn, err := node.Client.getConn(ctx) if err != nil { - node.MarkAsFailing() + if !isContextError(err) { + node.MarkAsFailing() + } _ = c.mapCmdsByNode(ctx, failedCmds, cmds) setCmdsErr(cmds, err) return err @@ -1457,58 +1585,88 @@ func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) err // Trim multi .. exec. cmds = cmds[1 : len(cmds)-1] + if len(cmds) == 0 { + return nil + } + state, err := c.state.Get(ctx) if err != nil { setCmdsErr(cmds, err) return err } - cmdsMap := c.mapCmdsBySlot(ctx, cmds) - for slot, cmds := range cmdsMap { - node, err := state.slotMasterNode(slot) - if err != nil { - setCmdsErr(cmds, err) - continue + keyedCmdsBySlot := c.slottedKeyedCommands(cmds) + slot := -1 + switch len(keyedCmdsBySlot) { + case 0: + slot = hashtag.RandomSlot() + case 1: + for sl := range keyedCmdsBySlot { + slot = sl + break } + default: + // TxPipeline does not support cross slot transaction. + setCmdsErr(cmds, ErrCrossSlot) + return ErrCrossSlot + } - cmdsMap := map[*clusterNode][]Cmder{node: cmds} - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { - setCmdsErr(cmds, err) - return err - } + node, err := state.slotMasterNode(slot) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + cmdsMap := map[*clusterNode][]Cmder{node: cmds} + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + setCmdsErr(cmds, err) + return err } + } - failedCmds := newCmdsMap() - var wg sync.WaitGroup + failedCmds := newCmdsMap() + var wg sync.WaitGroup - for node, cmds := range cmdsMap { - wg.Add(1) - go func(node *clusterNode, cmds []Cmder) { - defer wg.Done() - c.processTxPipelineNode(ctx, node, cmds, failedCmds) - }(node, cmds) - } + for node, cmds := range cmdsMap { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + c.processTxPipelineNode(ctx, node, cmds, failedCmds) + }(node, cmds) + } - wg.Wait() - if len(failedCmds.m) == 0 { - break - } - cmdsMap = failedCmds.m + wg.Wait() + if len(failedCmds.m) == 0 { + break } + cmdsMap = failedCmds.m } return cmdsFirstErr(cmds) } -func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int][]Cmder { - cmdsMap := make(map[int][]Cmder) +// slottedKeyedCommands returns a map of slot to commands taking into account +// only commands that have keys. +func (c *ClusterClient) slottedKeyedCommands(cmds []Cmder) map[int][]Cmder { + cmdsSlots := map[int][]Cmder{} + + preferredRandomSlot := -1 for _, cmd := range cmds { - slot := c.cmdSlot(ctx, cmd) - cmdsMap[slot] = append(cmdsMap[slot], cmd) + if cmdFirstKeyPos(cmd) == 0 { + continue + } + + slot := c.cmdSlot(cmd, preferredRandomSlot) + if preferredRandomSlot == -1 { + preferredRandomSlot = slot + } + + cmdsSlots[slot] = append(cmdsSlots[slot], cmd) } - return cmdsMap + + return cmdsSlots } func (c *ClusterClient) processTxPipelineNode( @@ -1552,7 +1710,7 @@ func (c *ClusterClient) processTxPipelineNodeConn( trimmedCmds := cmds[1 : len(cmds)-1] if err := c.txPipelineReadQueued( - ctx, rd, statusCmd, trimmedCmds, failedCmds, + ctx, node, cn, rd, statusCmd, trimmedCmds, failedCmds, ); err != nil { setCmdsErr(cmds, err) @@ -1564,30 +1722,56 @@ func (c *ClusterClient) processTxPipelineNodeConn( return err } - return pipelineReadCmds(rd, trimmedCmds) + return node.Client.pipelineReadCmds(ctx, cn, rd, trimmedCmds) }) } func (c *ClusterClient) txPipelineReadQueued( ctx context.Context, + node *clusterNode, + cn *pool.Conn, rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder, failedCmds *cmdsMap, ) error { // Parse queued replies. + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := node.Client.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + // Log the error but don't fail the command execution + // Push notification processing errors shouldn't break normal Redis operations + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } if err := statusCmd.readReply(rd); err != nil { return err } for _, cmd := range cmds { + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := node.Client.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + // Log the error but don't fail the command execution + // Push notification processing errors shouldn't break normal Redis operations + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } err := statusCmd.readReply(rd) - if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) { - continue + if err != nil { + if c.checkMovedErr(ctx, cmd, err, failedCmds) { + // will be processed later + continue + } + cmd.SetErr(err) + if !isRedisError(err) { + return err + } } - return err } + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := node.Client.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + // Log the error but don't fail the command execution + // Push notification processing errors shouldn't break normal Redis operations + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } // Parse number of replies. line, err := rd.ReadLine() if err != nil { @@ -1693,38 +1877,64 @@ func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...s return err } +// maintenance notifications won't work here for now func (c *ClusterClient) pubSub() *PubSub { var node *clusterNode pubsub := &PubSub{ opt: c.opt.clientOptions(), - - newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { + newConn: func(ctx context.Context, addr string, channels []string) (*pool.Conn, error) { if node != nil { panic("node != nil") } var err error + if len(channels) > 0 { slot := hashtag.Slot(channels[0]) - node, err = c.slotMasterNode(ctx, slot) + + // newConn in PubSub is only used for subscription connections, so it is safe to + // assume that a slave node can always be used when client options specify ReadOnly. + if c.opt.ReadOnly { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + + node, err = c.slotReadOnlyNode(state, slot) + if err != nil { + return nil, err + } + } else { + node, err = c.slotMasterNode(ctx, slot) + if err != nil { + return nil, err + } + } } else { node, err = c.nodes.Random() + if err != nil { + return nil, err + } } + cn, err := node.Client.pubSubPool.NewConn(ctx, node.Client.opt.Network, node.Client.opt.Addr, channels) if err != nil { + node = nil return nil, err } - - cn, err := node.Client.newConn(context.TODO()) + // will return nil if already initialized + err = node.Client.initConn(ctx, cn) if err != nil { + _ = cn.Close() node = nil - return nil, err } - + node.Client.pubSubPool.TrackConn(cn) return cn, nil }, closeConn: func(cn *pool.Conn) error { - err := node.Client.connPool.CloseConn(cn) + // Untrack connection from PubSubPool + node.Client.pubSubPool.UntrackConn(cn) + err := cn.Close() node = nil return err }, @@ -1823,17 +2033,20 @@ func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo { return info } -func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int { +func (c *ClusterClient) cmdSlot(cmd Cmder, preferredRandomSlot int) int { args := cmd.Args() - if args[0] == "cluster" && args[1] == "getkeysinslot" { + if args[0] == "cluster" && (args[1] == "getkeysinslot" || args[1] == "countkeysinslot") { return args[2].(int) } - return cmdSlot(cmd, cmdFirstKeyPos(cmd)) + return cmdSlot(cmd, cmdFirstKeyPos(cmd), preferredRandomSlot) } -func cmdSlot(cmd Cmder, pos int) int { +func cmdSlot(cmd Cmder, pos int, preferredRandomSlot int) int { if pos == 0 { + if preferredRandomSlot != -1 { + return preferredRandomSlot + } return hashtag.RandomSlot() } firstKey := cmd.stringArg(pos) @@ -1903,7 +2116,7 @@ func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, if err != nil { return nil, err } - return node.Client, err + return node.Client, nil } func (c *ClusterClient) context(ctx context.Context) context.Context { @@ -1913,26 +2126,13 @@ func (c *ClusterClient) context(ctx context.Context) context.Context { return context.Background() } -func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { - for _, n := range nodes { - if n == node { - return nodes - } - } - return append(nodes, node) -} - -func appendIfNotExists(ss []string, es ...string) []string { -loop: - for _, e := range es { - for _, s := range ss { - if s == e { - continue loop - } +func appendIfNotExist[T comparable](vals []T, newVal T) []T { + for _, v := range vals { + if v == newVal { + return vals } - ss = append(ss, e) } - return ss + return append(vals, newVal) } //------------------------------------------------------------------------------ diff --git a/vendor/github.com/redis/go-redis/v9/pipeline.go b/vendor/github.com/redis/go-redis/v9/pipeline.go index 1c114205c..567bf121a 100644 --- a/vendor/github.com/redis/go-redis/v9/pipeline.go +++ b/vendor/github.com/redis/go-redis/v9/pipeline.go @@ -7,7 +7,7 @@ import ( type pipelineExecer func(context.Context, []Cmder) error -// Pipeliner is an mechanism to realise Redis Pipeline technique. +// Pipeliner is a mechanism to realise Redis Pipeline technique. // // Pipelining is a technique to extremely speed up processing by packing // operations to batches, send them at once to Redis and read a replies in a @@ -23,21 +23,27 @@ type pipelineExecer func(context.Context, []Cmder) error type Pipeliner interface { StatefulCmdable - // Len is to obtain the number of commands in the pipeline that have not yet been executed. + // Len obtains the number of commands in the pipeline that have not yet been executed. Len() int // Do is an API for executing any command. // If a certain Redis command is not yet supported, you can use Do to execute it. Do(ctx context.Context, args ...interface{}) *Cmd - // Process is to put the commands to be executed into the pipeline buffer. + // Process queues the cmd for later execution. Process(ctx context.Context, cmd Cmder) error - // Discard is to discard all commands in the cache that have not yet been executed. + // BatchProcess adds multiple commands to be executed into the pipeline buffer. + BatchProcess(ctx context.Context, cmd ...Cmder) error + + // Discard discards all commands in the pipeline buffer that have not yet been executed. Discard() - // Exec is to send all the commands buffered in the pipeline to the redis-server. + // Exec sends all the commands buffered in the pipeline to the redis server. Exec(ctx context.Context) ([]Cmder, error) + + // Cmds returns the list of queued commands. + Cmds() []Cmder } var _ Pipeliner = (*Pipeline)(nil) @@ -76,7 +82,12 @@ func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd { // Process queues the cmd for later execution. func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error { - c.cmds = append(c.cmds, cmd) + return c.BatchProcess(ctx, cmd) +} + +// BatchProcess queues multiple cmds for later execution. +func (c *Pipeline) BatchProcess(ctx context.Context, cmd ...Cmder) error { + c.cmds = append(c.cmds, cmd...) return nil } @@ -119,3 +130,7 @@ func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([ func (c *Pipeline) TxPipeline() Pipeliner { return c } + +func (c *Pipeline) Cmds() []Cmder { + return c.cmds +} diff --git a/vendor/github.com/redis/go-redis/v9/probabilistic.go b/vendor/github.com/redis/go-redis/v9/probabilistic.go index 5d5cd1a62..c26e7cadb 100644 --- a/vendor/github.com/redis/go-redis/v9/probabilistic.go +++ b/vendor/github.com/redis/go-redis/v9/probabilistic.go @@ -319,37 +319,69 @@ func (cmd *BFInfoCmd) Result() (BFInfo, error) { } func (cmd *BFInfoCmd) readReply(rd *proto.Reader) (err error) { - n, err := rd.ReadMapLen() + result := BFInfo{} + + // Create a mapping from key names to pointers of struct fields + respMapping := map[string]*int64{ + "Capacity": &result.Capacity, + "CAPACITY": &result.Capacity, + "Size": &result.Size, + "SIZE": &result.Size, + "Number of filters": &result.Filters, + "FILTERS": &result.Filters, + "Number of items inserted": &result.ItemsInserted, + "ITEMS": &result.ItemsInserted, + "Expansion rate": &result.ExpansionRate, + "EXPANSION": &result.ExpansionRate, + } + + // Helper function to read and assign a value based on the key + readAndAssignValue := func(key string) error { + fieldPtr, exists := respMapping[key] + if !exists { + return fmt.Errorf("redis: BLOOM.INFO unexpected key %s", key) + } + + // Read the integer and assign to the field via pointer dereferencing + val, err := rd.ReadInt() + if err != nil { + return err + } + *fieldPtr = val + return nil + } + + readType, err := rd.PeekReplyType() if err != nil { return err } - var key string - var result BFInfo - for f := 0; f < n; f++ { - key, err = rd.ReadString() + if len(cmd.args) > 2 && readType == proto.RespArray { + n, err := rd.ReadArrayLen() if err != nil { return err } - - switch key { - case "Capacity": - result.Capacity, err = rd.ReadInt() - case "Size": - result.Size, err = rd.ReadInt() - case "Number of filters": - result.Filters, err = rd.ReadInt() - case "Number of items inserted": - result.ItemsInserted, err = rd.ReadInt() - case "Expansion rate": - result.ExpansionRate, err = rd.ReadInt() - default: - return fmt.Errorf("redis: BLOOM.INFO unexpected key %s", key) + if key, ok := cmd.args[2].(string); ok && n == 1 { + if err := readAndAssignValue(key); err != nil { + return err + } + } else { + return fmt.Errorf("redis: BLOOM.INFO invalid argument key type") } - + } else { + n, err := rd.ReadMapLen() if err != nil { return err } + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + if err := readAndAssignValue(key); err != nil { + return err + } + } } cmd.val = result @@ -1084,18 +1116,14 @@ func (c cmdable) TopKListWithCount(ctx context.Context, key string) *MapStringIn // Returns OK on success or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.add/ func (c cmdable) TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd { - args := make([]interface{}, 2, 2+len(elements)) + args := make([]interface{}, 2+len(elements)) args[0] = "TDIGEST.ADD" args[1] = key - // Convert floatSlice to []interface{} - interfaceSlice := make([]interface{}, len(elements)) for i, v := range elements { - interfaceSlice[i] = v + args[2+i] = v } - args = append(args, interfaceSlice...) - cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -1106,18 +1134,14 @@ func (c cmdable) TDigestAdd(ctx context.Context, key string, elements ...float64 // Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.byrank/ func (c cmdable) TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd { - args := make([]interface{}, 2, 2+len(rank)) + args := make([]interface{}, 2+len(rank)) args[0] = "TDIGEST.BYRANK" args[1] = key - // Convert uint slice to []interface{} - interfaceSlice := make([]interface{}, len(rank)) - for i, v := range rank { - interfaceSlice[i] = v + for i, r := range rank { + args[2+i] = r } - args = append(args, interfaceSlice...) - cmd := NewFloatSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -1128,18 +1152,14 @@ func (c cmdable) TDigestByRank(ctx context.Context, key string, rank ...uint64) // Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.byrevrank/ func (c cmdable) TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd { - args := make([]interface{}, 2, 2+len(rank)) + args := make([]interface{}, 2+len(rank)) args[0] = "TDIGEST.BYREVRANK" args[1] = key - // Convert uint slice to []interface{} - interfaceSlice := make([]interface{}, len(rank)) - for i, v := range rank { - interfaceSlice[i] = v + for i, r := range rank { + args[2+i] = r } - args = append(args, interfaceSlice...) - cmd := NewFloatSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -1150,18 +1170,14 @@ func (c cmdable) TDigestByRevRank(ctx context.Context, key string, rank ...uint6 // Returns an array of floats representing the CDF values for each element or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.cdf/ func (c cmdable) TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd { - args := make([]interface{}, 2, 2+len(elements)) + args := make([]interface{}, 2+len(elements)) args[0] = "TDIGEST.CDF" args[1] = key - // Convert floatSlice to []interface{} - interfaceSlice := make([]interface{}, len(elements)) for i, v := range elements { - interfaceSlice[i] = v + args[2+i] = v } - args = append(args, interfaceSlice...) - cmd := NewFloatSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -1344,18 +1360,14 @@ func (c cmdable) TDigestMin(ctx context.Context, key string) *FloatCmd { // Returns an array of floats representing the quantile values for each element or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.quantile/ func (c cmdable) TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd { - args := make([]interface{}, 2, 2+len(elements)) + args := make([]interface{}, 2+len(elements)) args[0] = "TDIGEST.QUANTILE" args[1] = key - // Convert floatSlice to []interface{} - interfaceSlice := make([]interface{}, len(elements)) for i, v := range elements { - interfaceSlice[i] = v + args[2+i] = v } - args = append(args, interfaceSlice...) - cmd := NewFloatSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -1366,18 +1378,14 @@ func (c cmdable) TDigestQuantile(ctx context.Context, key string, elements ...fl // Returns an array of integers representing the rank values for each element or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.rank/ func (c cmdable) TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd { - args := make([]interface{}, 2, 2+len(values)) + args := make([]interface{}, 2+len(values)) args[0] = "TDIGEST.RANK" args[1] = key - // Convert floatSlice to []interface{} - interfaceSlice := make([]interface{}, len(values)) for i, v := range values { - interfaceSlice[i] = v + args[i+2] = v } - args = append(args, interfaceSlice...) - cmd := NewIntSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -1399,18 +1407,14 @@ func (c cmdable) TDigestReset(ctx context.Context, key string) *StatusCmd { // Returns an array of integers representing the reverse rank values for each element or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.revrank/ func (c cmdable) TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd { - args := make([]interface{}, 2, 2+len(values)) + args := make([]interface{}, 2+len(values)) args[0] = "TDIGEST.REVRANK" args[1] = key - // Convert floatSlice to []interface{} - interfaceSlice := make([]interface{}, len(values)) for i, v := range values { - interfaceSlice[i] = v + args[2+i] = v } - args = append(args, interfaceSlice...) - cmd := NewIntSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd diff --git a/vendor/github.com/redis/go-redis/v9/pubsub.go b/vendor/github.com/redis/go-redis/v9/pubsub.go index 72b18f49a..959a5c45b 100644 --- a/vendor/github.com/redis/go-redis/v9/pubsub.go +++ b/vendor/github.com/redis/go-redis/v9/pubsub.go @@ -10,6 +10,7 @@ import ( "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/pool" "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/push" ) // PubSub implements Pub/Sub commands as described in @@ -21,7 +22,7 @@ import ( type PubSub struct { opt *Options - newConn func(ctx context.Context, channels []string) (*pool.Conn, error) + newConn func(ctx context.Context, addr string, channels []string) (*pool.Conn, error) closeConn func(*pool.Conn) error mu sync.Mutex @@ -38,6 +39,12 @@ type PubSub struct { chOnce sync.Once msgCh *channel allCh *channel + + // Push notification processor for handling generic push notifications + pushProcessor push.NotificationProcessor + + // Cleanup callback for maintenanceNotifications upgrade tracking + onClose func() } func (c *PubSub) init() { @@ -45,6 +52,9 @@ func (c *PubSub) init() { } func (c *PubSub) String() string { + c.mu.Lock() + defer c.mu.Unlock() + channels := mapKeys(c.channels) channels = append(channels, mapKeys(c.patterns)...) channels = append(channels, mapKeys(c.schannels)...) @@ -66,10 +76,18 @@ func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, er return c.cn, nil } + if c.opt.Addr == "" { + // TODO(maintenanceNotifications): + // this is probably cluster client + // c.newConn will ignore the addr argument + // will be changed when we have maintenanceNotifications upgrades for cluster clients + c.opt.Addr = internal.RedisNull + } + channels := mapKeys(c.channels) channels = append(channels, newChannels...) - cn, err := c.newConn(ctx, channels) + cn, err := c.newConn(ctx, c.opt.Addr, channels) if err != nil { return nil, err } @@ -150,12 +168,31 @@ func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allo if c.cn != cn { return } + + if !cn.IsUsable() || cn.ShouldHandoff() { + c.reconnect(ctx, fmt.Errorf("pubsub: connection is not usable")) + } + if isBadConn(err, allowTimeout, c.opt.Addr) { c.reconnect(ctx, err) } } func (c *PubSub) reconnect(ctx context.Context, reason error) { + if c.cn != nil && c.cn.ShouldHandoff() { + newEndpoint := c.cn.GetHandoffEndpoint() + // If new endpoint is NULL, use the original address + if newEndpoint == internal.RedisNull { + newEndpoint = c.opt.Addr + } + + if newEndpoint != "" { + // Update the address in the options + oldAddr := c.cn.RemoteAddr().String() + c.opt.Addr = newEndpoint + internal.Logger.Printf(ctx, "pubsub: reconnecting to new endpoint %s (was %s)", newEndpoint, oldAddr) + } + } _ = c.closeTheCn(reason) _, _ = c.conn(ctx, nil) } @@ -164,9 +201,6 @@ func (c *PubSub) closeTheCn(reason error) error { if c.cn == nil { return nil } - if !c.closed { - internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason) - } err := c.closeConn(c.cn) c.cn = nil return err @@ -182,6 +216,11 @@ func (c *PubSub) Close() error { c.closed = true close(c.exit) + // Call cleanup callback if set + if c.onClose != nil { + c.onClose() + } + return c.closeTheCn(pool.ErrClosed) } @@ -426,16 +465,20 @@ func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (int } // Don't hold the lock to allow subscriptions and pings. - cn, err := c.connWithLock(ctx) if err != nil { return nil, err } - err = cn.WithReader(context.Background(), timeout, func(rd *proto.Reader) error { + err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error { + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := c.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + // Log the error but don't fail the command execution + // Push notification processing errors shouldn't break normal Redis operations + internal.Logger.Printf(ctx, "push: conn[%d] error processing pending notifications before reading reply: %v", cn.GetID(), err) + } return c.cmd.readReply(rd) }) - c.releaseConnWithLock(ctx, cn, err, timeout > 0) if err != nil { @@ -448,6 +491,12 @@ func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (int // Receive returns a message as a Subscription, Message, Pong or error. // See PubSub example for details. This is low-level API and in most cases // Channel should be used instead. +// Receive returns a message as a Subscription, Message, Pong, or an error. +// See PubSub example for details. This is a low-level API and in most cases +// Channel should be used instead. +// This method blocks until a message is received or an error occurs. +// It may return early with an error if the context is canceled, the connection fails, +// or other internal errors occur. func (c *PubSub) Receive(ctx context.Context) (interface{}, error) { return c.ReceiveTimeout(ctx, 0) } @@ -529,6 +578,27 @@ func (c *PubSub) ChannelWithSubscriptions(opts ...ChannelOption) <-chan interfac return c.allCh.allCh } +func (c *PubSub) processPendingPushNotificationWithReader(ctx context.Context, cn *pool.Conn, rd *proto.Reader) error { + // Only process push notifications for RESP3 connections with a processor + if c.opt.Protocol != 3 || c.pushProcessor == nil { + return nil + } + + // Create handler context with client, connection pool, and connection information + handlerCtx := c.pushNotificationHandlerContext(cn) + return c.pushProcessor.ProcessPendingNotifications(ctx, handlerCtx, rd) +} + +func (c *PubSub) pushNotificationHandlerContext(cn *pool.Conn) push.NotificationHandlerContext { + // PubSub doesn't have a client or connection pool, so we pass nil for those + // PubSub connections are blocking + return push.NotificationHandlerContext{ + PubSub: c, + Conn: cn, + IsBlocking: true, + } +} + type ChannelOption func(c *channel) // WithChannelSize specifies the Go chan size that is used to buffer incoming messages. diff --git a/vendor/github.com/redis/go-redis/v9/push/errors.go b/vendor/github.com/redis/go-redis/v9/push/errors.go new file mode 100644 index 000000000..c10c98aa8 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push/errors.go @@ -0,0 +1,176 @@ +package push + +import ( + "errors" + "fmt" +) + +// Push notification error definitions +// This file contains all error types and messages used by the push notification system + +// Error reason constants +const ( + // HandlerReasons + ReasonHandlerNil = "handler cannot be nil" + ReasonHandlerExists = "cannot overwrite existing handler" + ReasonHandlerProtected = "handler is protected" + + // ProcessorReasons + ReasonPushNotificationsDisabled = "push notifications are disabled" +) + +// ProcessorType represents the type of processor involved in the error +// defined as a custom type for better readability and easier maintenance +type ProcessorType string + +const ( + // ProcessorTypes + ProcessorTypeProcessor = ProcessorType("processor") + ProcessorTypeVoidProcessor = ProcessorType("void_processor") + ProcessorTypeCustom = ProcessorType("custom") +) + +// ProcessorOperation represents the operation being performed by the processor +// defined as a custom type for better readability and easier maintenance +type ProcessorOperation string + +const ( + // ProcessorOperations + ProcessorOperationProcess = ProcessorOperation("process") + ProcessorOperationRegister = ProcessorOperation("register") + ProcessorOperationUnregister = ProcessorOperation("unregister") + ProcessorOperationUnknown = ProcessorOperation("unknown") +) + +// Common error variables for reuse +var ( + // ErrHandlerNil is returned when attempting to register a nil handler + ErrHandlerNil = errors.New(ReasonHandlerNil) +) + +// Registry errors + +// ErrHandlerExists creates an error for when attempting to overwrite an existing handler +func ErrHandlerExists(pushNotificationName string) error { + return NewHandlerError(ProcessorOperationRegister, pushNotificationName, ReasonHandlerExists, nil) +} + +// ErrProtectedHandler creates an error for when attempting to unregister a protected handler +func ErrProtectedHandler(pushNotificationName string) error { + return NewHandlerError(ProcessorOperationUnregister, pushNotificationName, ReasonHandlerProtected, nil) +} + +// VoidProcessor errors + +// ErrVoidProcessorRegister creates an error for when attempting to register a handler on void processor +func ErrVoidProcessorRegister(pushNotificationName string) error { + return NewProcessorError(ProcessorTypeVoidProcessor, ProcessorOperationRegister, pushNotificationName, ReasonPushNotificationsDisabled, nil) +} + +// ErrVoidProcessorUnregister creates an error for when attempting to unregister a handler on void processor +func ErrVoidProcessorUnregister(pushNotificationName string) error { + return NewProcessorError(ProcessorTypeVoidProcessor, ProcessorOperationUnregister, pushNotificationName, ReasonPushNotificationsDisabled, nil) +} + +// Error type definitions for advanced error handling + +// HandlerError represents errors related to handler operations +type HandlerError struct { + Operation ProcessorOperation + PushNotificationName string + Reason string + Err error +} + +func (e *HandlerError) Error() string { + if e.Err != nil { + return fmt.Sprintf("handler %s failed for '%s': %s (%v)", e.Operation, e.PushNotificationName, e.Reason, e.Err) + } + return fmt.Sprintf("handler %s failed for '%s': %s", e.Operation, e.PushNotificationName, e.Reason) +} + +func (e *HandlerError) Unwrap() error { + return e.Err +} + +// NewHandlerError creates a new HandlerError +func NewHandlerError(operation ProcessorOperation, pushNotificationName, reason string, err error) *HandlerError { + return &HandlerError{ + Operation: operation, + PushNotificationName: pushNotificationName, + Reason: reason, + Err: err, + } +} + +// ProcessorError represents errors related to processor operations +type ProcessorError struct { + ProcessorType ProcessorType // "processor", "void_processor" + Operation ProcessorOperation // "process", "register", "unregister" + PushNotificationName string // Name of the push notification involved + Reason string + Err error +} + +func (e *ProcessorError) Error() string { + notifInfo := "" + if e.PushNotificationName != "" { + notifInfo = fmt.Sprintf(" for '%s'", e.PushNotificationName) + } + if e.Err != nil { + return fmt.Sprintf("%s %s failed%s: %s (%v)", e.ProcessorType, e.Operation, notifInfo, e.Reason, e.Err) + } + return fmt.Sprintf("%s %s failed%s: %s", e.ProcessorType, e.Operation, notifInfo, e.Reason) +} + +func (e *ProcessorError) Unwrap() error { + return e.Err +} + +// NewProcessorError creates a new ProcessorError +func NewProcessorError(processorType ProcessorType, operation ProcessorOperation, pushNotificationName, reason string, err error) *ProcessorError { + return &ProcessorError{ + ProcessorType: processorType, + Operation: operation, + PushNotificationName: pushNotificationName, + Reason: reason, + Err: err, + } +} + +// Helper functions for common error scenarios + +// IsHandlerNilError checks if an error is due to a nil handler +func IsHandlerNilError(err error) bool { + return errors.Is(err, ErrHandlerNil) +} + +// IsHandlerExistsError checks if an error is due to attempting to overwrite an existing handler. +// This function works correctly even when the error is wrapped. +func IsHandlerExistsError(err error) bool { + var handlerErr *HandlerError + if errors.As(err, &handlerErr) { + return handlerErr.Operation == ProcessorOperationRegister && handlerErr.Reason == ReasonHandlerExists + } + return false +} + +// IsProtectedHandlerError checks if an error is due to attempting to unregister a protected handler. +// This function works correctly even when the error is wrapped. +func IsProtectedHandlerError(err error) bool { + var handlerErr *HandlerError + if errors.As(err, &handlerErr) { + return handlerErr.Operation == ProcessorOperationUnregister && handlerErr.Reason == ReasonHandlerProtected + } + return false +} + +// IsVoidProcessorError checks if an error is due to void processor operations. +// This function works correctly even when the error is wrapped. +func IsVoidProcessorError(err error) bool { + var procErr *ProcessorError + if errors.As(err, &procErr) { + return procErr.ProcessorType == ProcessorTypeVoidProcessor && procErr.Reason == ReasonPushNotificationsDisabled + } + return false +} diff --git a/vendor/github.com/redis/go-redis/v9/push/handler.go b/vendor/github.com/redis/go-redis/v9/push/handler.go new file mode 100644 index 000000000..815edce37 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push/handler.go @@ -0,0 +1,14 @@ +package push + +import ( + "context" +) + +// NotificationHandler defines the interface for push notification handlers. +type NotificationHandler interface { + // HandlePushNotification processes a push notification with context information. + // The handlerCtx provides information about the client, connection pool, and connection + // on which the notification was received, allowing handlers to make informed decisions. + // Returns an error if the notification could not be handled. + HandlePushNotification(ctx context.Context, handlerCtx NotificationHandlerContext, notification []interface{}) error +} diff --git a/vendor/github.com/redis/go-redis/v9/push/handler_context.go b/vendor/github.com/redis/go-redis/v9/push/handler_context.go new file mode 100644 index 000000000..c39e186b0 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push/handler_context.go @@ -0,0 +1,44 @@ +package push + +// No imports needed for this file + +// NotificationHandlerContext provides context information about where a push notification was received. +// This struct allows handlers to make informed decisions based on the source of the notification +// with strongly typed access to different client types using concrete types. +type NotificationHandlerContext struct { + // Client is the Redis client instance that received the notification. + // It is interface to both allow for future expansion and to avoid + // circular dependencies. The developer is responsible for type assertion. + // It can be one of the following types: + // - *redis.baseClient + // - *redis.Client + // - *redis.ClusterClient + // - *redis.Conn + Client interface{} + + // ConnPool is the connection pool from which the connection was obtained. + // It is interface to both allow for future expansion and to avoid + // circular dependencies. The developer is responsible for type assertion. + // It can be one of the following types: + // - *pool.ConnPool + // - *pool.SingleConnPool + // - *pool.StickyConnPool + ConnPool interface{} + + // PubSub is the PubSub instance that received the notification. + // It is interface to both allow for future expansion and to avoid + // circular dependencies. The developer is responsible for type assertion. + // It can be one of the following types: + // - *redis.PubSub + PubSub interface{} + + // Conn is the specific connection on which the notification was received. + // It is interface to both allow for future expansion and to avoid + // circular dependencies. The developer is responsible for type assertion. + // It can be one of the following types: + // - *pool.Conn + Conn interface{} + + // IsBlocking indicates if the notification was received on a blocking connection. + IsBlocking bool +} diff --git a/vendor/github.com/redis/go-redis/v9/push/processor.go b/vendor/github.com/redis/go-redis/v9/push/processor.go new file mode 100644 index 000000000..b8112ddc8 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push/processor.go @@ -0,0 +1,203 @@ +package push + +import ( + "context" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/proto" +) + +// NotificationProcessor defines the interface for push notification processors. +type NotificationProcessor interface { + // GetHandler returns the handler for a specific push notification name. + GetHandler(pushNotificationName string) NotificationHandler + // ProcessPendingNotifications checks for and processes any pending push notifications. + // To be used when it is known that there are notifications on the socket. + // It will try to read from the socket and if it is empty - it may block. + ProcessPendingNotifications(ctx context.Context, handlerCtx NotificationHandlerContext, rd *proto.Reader) error + // RegisterHandler registers a handler for a specific push notification name. + RegisterHandler(pushNotificationName string, handler NotificationHandler, protected bool) error + // UnregisterHandler removes a handler for a specific push notification name. + UnregisterHandler(pushNotificationName string) error +} + +// Processor handles push notifications with a registry of handlers +type Processor struct { + registry *Registry +} + +// NewProcessor creates a new push notification processor +func NewProcessor() *Processor { + return &Processor{ + registry: NewRegistry(), + } +} + +// GetHandler returns the handler for a specific push notification name +func (p *Processor) GetHandler(pushNotificationName string) NotificationHandler { + return p.registry.GetHandler(pushNotificationName) +} + +// RegisterHandler registers a handler for a specific push notification name +func (p *Processor) RegisterHandler(pushNotificationName string, handler NotificationHandler, protected bool) error { + return p.registry.RegisterHandler(pushNotificationName, handler, protected) +} + +// UnregisterHandler removes a handler for a specific push notification name +func (p *Processor) UnregisterHandler(pushNotificationName string) error { + return p.registry.UnregisterHandler(pushNotificationName) +} + +// ProcessPendingNotifications checks for and processes any pending push notifications +// This method should be called by the client in WithReader before reading the reply +// It will try to read from the socket and if it is empty - it may block. +func (p *Processor) ProcessPendingNotifications(ctx context.Context, handlerCtx NotificationHandlerContext, rd *proto.Reader) error { + if rd == nil { + return nil + } + + for { + // Check if there's data available to read + replyType, err := rd.PeekReplyType() + if err != nil { + // No more data available or error reading + // if timeout, it will be handled by the caller + break + } + + // Only process push notifications (arrays starting with >) + if replyType != proto.RespPush { + break + } + + // see if we should skip this notification + notificationName, err := rd.PeekPushNotificationName() + if err != nil { + break + } + + if willHandleNotificationInClient(notificationName) { + break + } + + // Read the push notification + reply, err := rd.ReadReply() + if err != nil { + internal.Logger.Printf(ctx, "push: error reading push notification: %v", err) + break + } + + // Convert to slice of interfaces + notification, ok := reply.([]interface{}) + if !ok { + break + } + + // Handle the notification directly + if len(notification) > 0 { + // Extract the notification type (first element) + if notificationType, ok := notification[0].(string); ok { + // Get the handler for this notification type + if handler := p.registry.GetHandler(notificationType); handler != nil { + // Handle the notification + err := handler.HandlePushNotification(ctx, handlerCtx, notification) + if err != nil { + internal.Logger.Printf(ctx, "push: error handling push notification: %v", err) + } + } + } + } + } + + return nil +} + +// VoidProcessor discards all push notifications without processing them +type VoidProcessor struct{} + +// NewVoidProcessor creates a new void push notification processor +func NewVoidProcessor() *VoidProcessor { + return &VoidProcessor{} +} + +// GetHandler returns nil for void processor since it doesn't maintain handlers +func (v *VoidProcessor) GetHandler(_ string) NotificationHandler { + return nil +} + +// RegisterHandler returns an error for void processor since it doesn't maintain handlers +func (v *VoidProcessor) RegisterHandler(pushNotificationName string, _ NotificationHandler, _ bool) error { + return ErrVoidProcessorRegister(pushNotificationName) +} + +// UnregisterHandler returns an error for void processor since it doesn't maintain handlers +func (v *VoidProcessor) UnregisterHandler(pushNotificationName string) error { + return ErrVoidProcessorUnregister(pushNotificationName) +} + +// ProcessPendingNotifications for VoidProcessor does nothing since push notifications +// are only available in RESP3 and this processor is used for RESP2 connections. +// This avoids unnecessary buffer scanning overhead. +// It does however read and discard all push notifications from the buffer to avoid +// them being interpreted as a reply. +// This method should be called by the client in WithReader before reading the reply +// to be sure there are no buffered push notifications. +// It will try to read from the socket and if it is empty - it may block. +func (v *VoidProcessor) ProcessPendingNotifications(_ context.Context, handlerCtx NotificationHandlerContext, rd *proto.Reader) error { + // read and discard all push notifications + if rd == nil { + return nil + } + + for { + // Check if there's data available to read + replyType, err := rd.PeekReplyType() + if err != nil { + // No more data available or error reading + // if timeout, it will be handled by the caller + break + } + + // Only process push notifications (arrays starting with >) + if replyType != proto.RespPush { + break + } + // see if we should skip this notification + notificationName, err := rd.PeekPushNotificationName() + if err != nil { + break + } + + if willHandleNotificationInClient(notificationName) { + break + } + + // Read the push notification + _, err = rd.ReadReply() + if err != nil { + internal.Logger.Printf(context.Background(), "push: error reading push notification: %v", err) + return nil + } + } + return nil +} + +// willHandleNotificationInClient checks if a notification type should be ignored by the push notification +// processor and handled by other specialized systems instead (pub/sub, streams, keyspace, etc.). +func willHandleNotificationInClient(notificationType string) bool { + switch notificationType { + // Pub/Sub notifications - handled by pub/sub system + case "message", // Regular pub/sub message + "pmessage", // Pattern pub/sub message + "subscribe", // Subscription confirmation + "unsubscribe", // Unsubscription confirmation + "psubscribe", // Pattern subscription confirmation + "punsubscribe", // Pattern unsubscription confirmation + "smessage", // Sharded pub/sub message (Redis 7.0+) + "ssubscribe", // Sharded subscription confirmation + "sunsubscribe": // Sharded unsubscription confirmation + return true + default: + return false + } +} diff --git a/vendor/github.com/redis/go-redis/v9/push/push.go b/vendor/github.com/redis/go-redis/v9/push/push.go new file mode 100644 index 000000000..e6adeaa45 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push/push.go @@ -0,0 +1,7 @@ +// Package push provides push notifications for Redis. +// This is an EXPERIMENTAL API for handling push notifications from Redis. +// It is not yet stable and may change in the future. +// Although this is in a public package, in its current form public use is not advised. +// Pending push notifications should be processed before executing any readReply from the connection +// as per RESP3 specification push notifications can be sent at any time. +package push diff --git a/vendor/github.com/redis/go-redis/v9/push/registry.go b/vendor/github.com/redis/go-redis/v9/push/registry.go new file mode 100644 index 000000000..a265ae92f --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push/registry.go @@ -0,0 +1,61 @@ +package push + +import ( + "sync" +) + +// Registry manages push notification handlers +type Registry struct { + mu sync.RWMutex + handlers map[string]NotificationHandler + protected map[string]bool +} + +// NewRegistry creates a new push notification registry +func NewRegistry() *Registry { + return &Registry{ + handlers: make(map[string]NotificationHandler), + protected: make(map[string]bool), + } +} + +// RegisterHandler registers a handler for a specific push notification name +func (r *Registry) RegisterHandler(pushNotificationName string, handler NotificationHandler, protected bool) error { + if handler == nil { + return ErrHandlerNil + } + + r.mu.Lock() + defer r.mu.Unlock() + + // Check if handler already exists + if _, exists := r.protected[pushNotificationName]; exists { + return ErrHandlerExists(pushNotificationName) + } + + r.handlers[pushNotificationName] = handler + r.protected[pushNotificationName] = protected + return nil +} + +// GetHandler returns the handler for a specific push notification name +func (r *Registry) GetHandler(pushNotificationName string) NotificationHandler { + r.mu.RLock() + defer r.mu.RUnlock() + return r.handlers[pushNotificationName] +} + +// UnregisterHandler removes a handler for a specific push notification name +func (r *Registry) UnregisterHandler(pushNotificationName string) error { + r.mu.Lock() + defer r.mu.Unlock() + + // Check if handler is protected + if protected, exists := r.protected[pushNotificationName]; exists && protected { + return ErrProtectedHandler(pushNotificationName) + } + + delete(r.handlers, pushNotificationName) + delete(r.protected, pushNotificationName) + return nil +} diff --git a/vendor/github.com/redis/go-redis/v9/push_notifications.go b/vendor/github.com/redis/go-redis/v9/push_notifications.go new file mode 100644 index 000000000..572955fec --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push_notifications.go @@ -0,0 +1,21 @@ +package redis + +import ( + "github.com/redis/go-redis/v9/push" +) + +// NewPushNotificationProcessor creates a new push notification processor +// This processor maintains a registry of handlers and processes push notifications +// It is used for RESP3 connections where push notifications are available +func NewPushNotificationProcessor() push.NotificationProcessor { + return push.NewProcessor() +} + +// NewVoidPushNotificationProcessor creates a new void push notification processor +// This processor does not maintain any handlers and always returns nil for all operations +// It is used for RESP2 connections where push notifications are not available +// It can also be used to disable push notifications for RESP3 connections, where +// it will discard all push notifications without processing them +func NewVoidPushNotificationProcessor() push.NotificationProcessor { + return push.NewVoidProcessor() +} diff --git a/vendor/github.com/redis/go-redis/v9/redis.go b/vendor/github.com/redis/go-redis/v9/redis.go index a3bc78604..a6a710677 100644 --- a/vendor/github.com/redis/go-redis/v9/redis.go +++ b/vendor/github.com/redis/go-redis/v9/redis.go @@ -9,10 +9,14 @@ import ( "sync/atomic" "time" + "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/auth/streaming" "github.com/redis/go-redis/v9/internal/hscan" "github.com/redis/go-redis/v9/internal/pool" "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/maintnotifications" + "github.com/redis/go-redis/v9/push" ) // Scanner internal/hscan.Scanner exposed interface. @@ -22,10 +26,16 @@ type Scanner = hscan.Scanner const Nil = proto.Nil // SetLogger set custom log +// Use with VoidLogger to disable logging. func SetLogger(logger internal.Logging) { internal.Logger = logger } +// SetLogLevel sets the log level for the library. +func SetLogLevel(logLevel internal.LogLevelT) { + internal.LogLevel = logLevel +} + //------------------------------------------------------------------------------ type Hook interface { @@ -41,7 +51,7 @@ type ( ) type hooksMixin struct { - hooksMu *sync.Mutex + hooksMu *sync.RWMutex slice []Hook initial hooks @@ -49,7 +59,7 @@ type hooksMixin struct { } func (hs *hooksMixin) initHooks(hooks hooks) { - hs.hooksMu = new(sync.Mutex) + hs.hooksMu = new(sync.RWMutex) hs.initial = hooks hs.chain() } @@ -151,7 +161,7 @@ func (hs *hooksMixin) clone() hooksMixin { clone := *hs l := len(clone.slice) clone.slice = clone.slice[:l:l] - clone.hooksMu = new(sync.Mutex) + clone.hooksMu = new(sync.RWMutex) return clone } @@ -176,9 +186,14 @@ func (hs *hooksMixin) withProcessPipelineHook( } func (hs *hooksMixin) dialHook(ctx context.Context, network, addr string) (net.Conn, error) { - hs.hooksMu.Lock() - defer hs.hooksMu.Unlock() - return hs.current.dial(ctx, network, addr) + // Access to hs.current is guarded by a read-only lock since it may be mutated by AddHook(...) + // while this dialer is concurrently accessed by the background connection pool population + // routine when MinIdleConns > 0. + hs.hooksMu.RLock() + current := hs.current + hs.hooksMu.RUnlock() + + return current.dial(ctx, network, addr) } func (hs *hooksMixin) processHook(ctx context.Context, cmd Cmder) error { @@ -196,15 +211,39 @@ func (hs *hooksMixin) processTxPipelineHook(ctx context.Context, cmds []Cmder) e //------------------------------------------------------------------------------ type baseClient struct { - opt *Options - connPool pool.Pooler + opt *Options + optLock sync.RWMutex + connPool pool.Pooler + pubSubPool *pool.PubSubPool + hooksMixin onClose func() error // hook called when client is closed + + // Push notification processing + pushProcessor push.NotificationProcessor + + // Maintenance notifications manager + maintNotificationsManager *maintnotifications.Manager + maintNotificationsManagerLock sync.RWMutex + + // streamingCredentialsManager is used to manage streaming credentials + streamingCredentialsManager *streaming.Manager } func (c *baseClient) clone() *baseClient { - clone := *c - return &clone + c.maintNotificationsManagerLock.RLock() + maintNotificationsManager := c.maintNotificationsManager + c.maintNotificationsManagerLock.RUnlock() + + clone := &baseClient{ + opt: c.opt, + connPool: c.connPool, + onClose: c.onClose, + pushProcessor: c.pushProcessor, + maintNotificationsManager: maintNotificationsManager, + streamingCredentialsManager: c.streamingCredentialsManager, + } + return clone } func (c *baseClient) withTimeout(timeout time.Duration) *baseClient { @@ -222,21 +261,6 @@ func (c *baseClient) String() string { return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB) } -func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) { - cn, err := c.connPool.NewConn(ctx) - if err != nil { - return nil, err - } - - err = c.initConn(ctx, cn) - if err != nil { - _ = c.connPool.CloseConn(cn) - return nil, err - } - - return cn, nil -} - func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) { if c.opt.Limiter != nil { err := c.opt.Limiter.Allow() @@ -262,7 +286,7 @@ func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) { return nil, err } - if cn.Inited { + if cn.IsInited() { return cn, nil } @@ -274,40 +298,199 @@ func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) { return nil, err } + // initConn will transition to IDLE state, so we need to acquire it + // before returning it to the user. + if !cn.TryAcquire() { + return nil, fmt.Errorf("redis: connection is not usable") + } + return cn, nil } +func (c *baseClient) reAuthConnection() func(poolCn *pool.Conn, credentials auth.Credentials) error { + return func(poolCn *pool.Conn, credentials auth.Credentials) error { + var err error + username, password := credentials.BasicAuth() + + // Use background context - timeout is handled by ReadTimeout in WithReader/WithWriter + ctx := context.Background() + + connPool := pool.NewSingleConnPool(c.connPool, poolCn) + + // Pass hooks so that reauth commands are recorded/traced + cn := newConn(c.opt, connPool, &c.hooksMixin) + + if username != "" { + err = cn.AuthACL(ctx, username, password).Err() + } else { + err = cn.Auth(ctx, password).Err() + } + + return err + } +} +func (c *baseClient) onAuthenticationErr() func(poolCn *pool.Conn, err error) { + return func(poolCn *pool.Conn, err error) { + if err != nil { + if isBadConn(err, false, c.opt.Addr) { + // Close the connection to force a reconnection. + err := c.connPool.CloseConn(poolCn) + if err != nil { + internal.Logger.Printf(context.Background(), "redis: failed to close connection: %v", err) + // try to close the network connection directly + // so that no resource is leaked + err := poolCn.Close() + if err != nil { + internal.Logger.Printf(context.Background(), "redis: failed to close network connection: %v", err) + } + } + } + internal.Logger.Printf(context.Background(), "redis: re-authentication failed: %v", err) + } + } +} + +func (c *baseClient) wrappedOnClose(newOnClose func() error) func() error { + onClose := c.onClose + return func() error { + var firstErr error + err := newOnClose() + // Even if we have an error we would like to execute the onClose hook + // if it exists. We will return the first error that occurred. + // This is to keep error handling consistent with the rest of the code. + if err != nil { + firstErr = err + } + if onClose != nil { + err = onClose() + if err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr + } +} + func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { - if cn.Inited { + // This function is called in two scenarios: + // 1. First-time init: Connection is in CREATED state (from pool.Get()) + // - We need to transition CREATED → INITIALIZING and do the initialization + // - If another goroutine is already initializing, we WAIT for it to finish + // 2. Re-initialization: Connection is in INITIALIZING state (from SetNetConnAndInitConn()) + // - We're already in INITIALIZING, so just proceed with initialization + + currentState := cn.GetStateMachine().GetState() + + // Fast path: Check if already initialized (IDLE or IN_USE) + if currentState == pool.StateIdle || currentState == pool.StateInUse { return nil } - cn.Inited = true - var err error - username, password := c.opt.Username, c.opt.Password - if c.opt.CredentialsProviderContext != nil { - if username, password, err = c.opt.CredentialsProviderContext(ctx); err != nil { + // If in CREATED state, try to transition to INITIALIZING + if currentState == pool.StateCreated { + finalState, err := cn.GetStateMachine().TryTransition([]pool.ConnState{pool.StateCreated}, pool.StateInitializing) + if err != nil { + // Another goroutine is initializing or connection is in unexpected state + // Check what state we're in now + if finalState == pool.StateIdle || finalState == pool.StateInUse { + // Already initialized by another goroutine + return nil + } + + if finalState == pool.StateInitializing { + // Another goroutine is initializing - WAIT for it to complete + // Use a context with timeout = min(remaining command timeout, DialTimeout) + // This prevents waiting too long while respecting the caller's deadline + var waitCtx context.Context + var cancel context.CancelFunc + dialTimeout := c.opt.DialTimeout + + if cmdDeadline, hasCmdDeadline := ctx.Deadline(); hasCmdDeadline { + // Calculate remaining time until command deadline + remainingTime := time.Until(cmdDeadline) + // Use the minimum of remaining time and DialTimeout + if remainingTime < dialTimeout { + // Command deadline is sooner, use it + waitCtx = ctx + } else { + // DialTimeout is shorter, cap the wait at DialTimeout + waitCtx, cancel = context.WithTimeout(ctx, dialTimeout) + } + } else { + // No command deadline, use DialTimeout to prevent waiting indefinitely + waitCtx, cancel = context.WithTimeout(ctx, dialTimeout) + } + if cancel != nil { + defer cancel() + } + + finalState, err := cn.GetStateMachine().AwaitAndTransition( + waitCtx, + []pool.ConnState{pool.StateIdle, pool.StateInUse}, + pool.StateIdle, // Target is IDLE (but we're already there, so this is a no-op) + ) + if err != nil { + return err + } + // Verify we're now initialized + if finalState == pool.StateIdle || finalState == pool.StateInUse { + return nil + } + // Unexpected state after waiting + return fmt.Errorf("connection in unexpected state after initialization: %s", finalState) + } + + // Unexpected state (CLOSED, UNUSABLE, etc.) return err } - } else if c.opt.CredentialsProvider != nil { - username, password = c.opt.CredentialsProvider() } + // At this point, we're in INITIALIZING state and we own the initialization + // If we fail, we must transition to CLOSED + var initErr error connPool := pool.NewSingleConnPool(c.connPool, cn) - conn := newConn(c.opt, connPool) + conn := newConn(c.opt, connPool, &c.hooksMixin) + + username, password := "", "" + if c.opt.StreamingCredentialsProvider != nil { + credListener, initErr := c.streamingCredentialsManager.Listener( + cn, + c.reAuthConnection(), + c.onAuthenticationErr(), + ) + if initErr != nil { + cn.GetStateMachine().Transition(pool.StateClosed) + return fmt.Errorf("failed to create credentials listener: %w", initErr) + } - var auth bool - protocol := c.opt.Protocol - // By default, use RESP3 in current version. - if protocol < 2 { - protocol = 3 + credentials, unsubscribeFromCredentialsProvider, initErr := c.opt.StreamingCredentialsProvider. + Subscribe(credListener) + if initErr != nil { + cn.GetStateMachine().Transition(pool.StateClosed) + return fmt.Errorf("failed to subscribe to streaming credentials: %w", initErr) + } + + c.onClose = c.wrappedOnClose(unsubscribeFromCredentialsProvider) + cn.SetOnClose(unsubscribeFromCredentialsProvider) + + username, password = credentials.BasicAuth() + } else if c.opt.CredentialsProviderContext != nil { + username, password, initErr = c.opt.CredentialsProviderContext(ctx) + if initErr != nil { + cn.GetStateMachine().Transition(pool.StateClosed) + return fmt.Errorf("failed to get credentials from context provider: %w", initErr) + } + } else if c.opt.CredentialsProvider != nil { + username, password = c.opt.CredentialsProvider() + } else if c.opt.Username != "" || c.opt.Password != "" { + username, password = c.opt.Username, c.opt.Password } // for redis-server versions that do not support the HELLO command, // RESP2 will continue to be used. - if err = conn.Hello(ctx, protocol, username, password, "").Err(); err == nil { - auth = true - } else if !isRedisError(err) { + if initErr = conn.Hello(ctx, c.opt.Protocol, username, password, c.opt.ClientName).Err(); initErr == nil { + // Authentication successful with HELLO command + } else if !isRedisError(initErr) { // When the server responds with the RESP protocol and the result is not a normal // execution result of the HELLO command, we consider it to be an indication that // the server does not support the HELLO command. @@ -315,18 +498,22 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { // or it could be DragonflyDB or a third-party redis-proxy. They all respond // with different error string results for unsupported commands, making it // difficult to rely on error strings to determine all results. - return err - } - - _, err = conn.Pipelined(ctx, func(pipe Pipeliner) error { - if !auth && password != "" { - if username != "" { - pipe.AuthACL(ctx, username, password) - } else { - pipe.Auth(ctx, password) - } + cn.GetStateMachine().Transition(pool.StateClosed) + return initErr + } else if password != "" { + // Try legacy AUTH command if HELLO failed + if username != "" { + initErr = conn.AuthACL(ctx, username, password).Err() + } else { + initErr = conn.Auth(ctx, password).Err() + } + if initErr != nil { + cn.GetStateMachine().Transition(pool.StateClosed) + return fmt.Errorf("failed to authenticate: %w", initErr) } + } + _, initErr = conn.Pipelined(ctx, func(pipe Pipeliner) error { if c.opt.DB > 0 { pipe.Select(ctx, c.opt.DB) } @@ -341,8 +528,58 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { return nil }) - if err != nil { - return err + if initErr != nil { + cn.GetStateMachine().Transition(pool.StateClosed) + return fmt.Errorf("failed to initialize connection options: %w", initErr) + } + + // Enable maintnotifications if maintnotifications are configured + c.optLock.RLock() + maintNotifEnabled := c.opt.MaintNotificationsConfig != nil && c.opt.MaintNotificationsConfig.Mode != maintnotifications.ModeDisabled + protocol := c.opt.Protocol + endpointType := c.opt.MaintNotificationsConfig.EndpointType + c.optLock.RUnlock() + var maintNotifHandshakeErr error + if maintNotifEnabled && protocol == 3 { + maintNotifHandshakeErr = conn.ClientMaintNotifications( + ctx, + true, + endpointType.String(), + ).Err() + if maintNotifHandshakeErr != nil { + if !isRedisError(maintNotifHandshakeErr) { + // if not redis error, fail the connection + cn.GetStateMachine().Transition(pool.StateClosed) + return maintNotifHandshakeErr + } + c.optLock.Lock() + // handshake failed - check and modify config atomically + switch c.opt.MaintNotificationsConfig.Mode { + case maintnotifications.ModeEnabled: + // enabled mode, fail the connection + c.optLock.Unlock() + cn.GetStateMachine().Transition(pool.StateClosed) + return fmt.Errorf("failed to enable maintnotifications: %w", maintNotifHandshakeErr) + default: // will handle auto and any other + // Disabling logging here as it's too noisy. + // TODO: Enable when we have a better logging solution for log levels + // internal.Logger.Printf(ctx, "auto mode fallback: maintnotifications disabled due to handshake error: %v", maintNotifHandshakeErr) + c.opt.MaintNotificationsConfig.Mode = maintnotifications.ModeDisabled + c.optLock.Unlock() + // auto mode, disable maintnotifications and continue + if initErr := c.disableMaintNotificationsUpgrades(); initErr != nil { + // Log error but continue - auto mode should be resilient + internal.Logger.Printf(ctx, "failed to disable maintnotifications in auto mode: %v", initErr) + } + } + } else { + // handshake was executed successfully + // to make sure that the handshake will be executed on other connections as well if it was successfully + // executed on this connection, we will force the handshake to be executed on all connections + c.optLock.Lock() + c.opt.MaintNotificationsConfig.Mode = maintnotifications.ModeEnabled + c.optLock.Unlock() + } } if !c.opt.DisableIdentity && !c.opt.DisableIndentity { @@ -356,14 +593,33 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { p.ClientSetInfo(ctx, WithLibraryVersion(libVer)) // Handle network errors (e.g. timeouts) in CLIENT SETINFO to avoid // out of order responses later on. - if _, err = p.Exec(ctx); err != nil && !isRedisError(err) { - return err + if _, initErr = p.Exec(ctx); initErr != nil && !isRedisError(initErr) { + cn.GetStateMachine().Transition(pool.StateClosed) + return initErr } } + // Set the connection initialization function for potential reconnections + // This must be set before transitioning to IDLE so that handoff/reauth can use it + cn.SetInitConnFunc(c.createInitConnFunc()) + + // Initialization succeeded - transition to IDLE state + // This marks the connection as initialized and ready for use + // NOTE: The connection is still owned by the calling goroutine at this point + // and won't be available to other goroutines until it's Put() back into the pool + cn.GetStateMachine().Transition(pool.StateIdle) + + // Call OnConnect hook if configured + // The connection is in IDLE state but still owned by this goroutine + // If OnConnect needs to send commands, it can use the connection safely if c.opt.OnConnect != nil { - return c.opt.OnConnect(ctx, conn) + if initErr = c.opt.OnConnect(ctx, conn); initErr != nil { + // OnConnect failed - transition to closed + cn.GetStateMachine().Transition(pool.StateClosed) + return initErr + } } + return nil } @@ -375,6 +631,10 @@ func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) if isBadConn(err, false, c.opt.Addr) { c.connPool.Remove(ctx, cn, err) } else { + // process any pending push notifications before returning the connection to the pool + if err := c.processPushNotifications(ctx, cn); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before releasing connection: %v", err) + } c.connPool.Put(ctx, cn) } } @@ -416,6 +676,19 @@ func (c *baseClient) process(ctx context.Context, cmd Cmder) error { return lastErr } +func (c *baseClient) assertUnstableCommand(cmd Cmder) (bool, error) { + switch cmd.(type) { + case *AggregateCmd, *FTInfoCmd, *FTSpellCheckCmd, *FTSearchCmd, *FTSynDumpCmd: + if c.opt.UnstableResp3 { + return true, nil + } else { + return false, fmt.Errorf("RESP3 responses for this command are disabled because they may still change. Please set the flag UnstableResp3. See the README and the release notes for guidance") + } + default: + return false, nil + } +} + func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) { if attempt > 0 { if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { @@ -425,14 +698,35 @@ func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool retryTimeout := uint32(0) if err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + // Process any pending push notifications before executing the command + if err := c.processPushNotifications(ctx, cn); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before command: %v", err) + } + if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmd(wr, cmd) }); err != nil { atomic.StoreUint32(&retryTimeout, 1) return err } - - if err := cn.WithReader(c.context(ctx), c.cmdTimeout(cmd), cmd.readReply); err != nil { + readReplyFunc := cmd.readReply + // Apply unstable RESP3 search module. + if c.opt.Protocol != 2 { + useRawReply, err := c.assertUnstableCommand(cmd) + if err != nil { + return err + } + if useRawReply { + readReplyFunc = cmd.readRawReply + } + } + if err := cn.WithReader(c.context(ctx), c.cmdTimeout(cmd), func(rd *proto.Reader) error { + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := c.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } + return readReplyFunc(rd) + }); err != nil { if cmd.readTimeout() == nil { atomic.StoreUint32(&retryTimeout, 1) } else { @@ -465,19 +759,86 @@ func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration { return c.opt.ReadTimeout } +// context returns the context for the current connection. +// If the context timeout is enabled, it returns the original context. +// Otherwise, it returns a new background context. +func (c *baseClient) context(ctx context.Context) context.Context { + if c.opt.ContextTimeoutEnabled { + return ctx + } + return context.Background() +} + +// createInitConnFunc creates a connection initialization function that can be used for reconnections. +func (c *baseClient) createInitConnFunc() func(context.Context, *pool.Conn) error { + return func(ctx context.Context, cn *pool.Conn) error { + return c.initConn(ctx, cn) + } +} + +// enableMaintNotificationsUpgrades initializes the maintnotifications upgrade manager and pool hook. +// This function is called during client initialization. +// will register push notification handlers for all maintenance upgrade events. +// will start background workers for handoff processing in the pool hook. +func (c *baseClient) enableMaintNotificationsUpgrades() error { + // Create client adapter + clientAdapterInstance := newClientAdapter(c) + + // Create maintnotifications manager directly + manager, err := maintnotifications.NewManager(clientAdapterInstance, c.connPool, c.opt.MaintNotificationsConfig) + if err != nil { + return err + } + // Set the manager reference and initialize pool hook + c.maintNotificationsManagerLock.Lock() + c.maintNotificationsManager = manager + c.maintNotificationsManagerLock.Unlock() + + // Initialize pool hook (safe to call without lock since manager is now set) + manager.InitPoolHook(c.dialHook) + return nil +} + +func (c *baseClient) disableMaintNotificationsUpgrades() error { + c.maintNotificationsManagerLock.Lock() + defer c.maintNotificationsManagerLock.Unlock() + + // Close the maintnotifications manager + if c.maintNotificationsManager != nil { + // Closing the manager will also shutdown the pool hook + // and remove it from the pool + c.maintNotificationsManager.Close() + c.maintNotificationsManager = nil + } + return nil +} + // Close closes the client, releasing any open resources. // // It is rare to Close a Client, as the Client is meant to be // long-lived and shared between many goroutines. func (c *baseClient) Close() error { var firstErr error + + // Close maintnotifications manager first + if err := c.disableMaintNotificationsUpgrades(); err != nil { + firstErr = err + } + if c.onClose != nil { - if err := c.onClose(); err != nil { + if err := c.onClose(); err != nil && firstErr == nil { firstErr = err } } - if err := c.connPool.Close(); err != nil && firstErr == nil { - firstErr = err + if c.connPool != nil { + if err := c.connPool.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + if c.pubSubPool != nil { + if err := c.pubSubPool.Close(); err != nil && firstErr == nil { + firstErr = err + } } return firstErr } @@ -517,11 +878,19 @@ func (c *baseClient) generalProcessPipeline( // Enable retries by default to retry dial errors returned by withConn. canRetry := true lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + // Process any pending push notifications before executing the pipeline + if err := c.processPushNotifications(ctx, cn); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before processing pipeline: %v", err) + } var err error canRetry, err = p(ctx, cn, cmds) return err }) if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) { + // The error should be set here only when failing to obtain the conn. + if !isRedisError(lastErr) { + setCmdsErr(cmds, lastErr) + } return lastErr } } @@ -531,6 +900,11 @@ func (c *baseClient) generalProcessPipeline( func (c *baseClient) pipelineProcessCmds( ctx context.Context, cn *pool.Conn, cmds []Cmder, ) (bool, error) { + // Process any pending push notifications before executing the pipeline + if err := c.processPushNotifications(ctx, cn); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before writing pipeline: %v", err) + } + if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmds(wr, cmds) }); err != nil { @@ -539,7 +913,8 @@ func (c *baseClient) pipelineProcessCmds( } if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error { - return pipelineReadCmds(rd, cmds) + // read all replies + return c.pipelineReadCmds(ctx, cn, rd, cmds) }); err != nil { return true, err } @@ -547,8 +922,12 @@ func (c *baseClient) pipelineProcessCmds( return false, nil } -func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error { +func (c *baseClient) pipelineReadCmds(ctx context.Context, cn *pool.Conn, rd *proto.Reader, cmds []Cmder) error { for i, cmd := range cmds { + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := c.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } err := cmd.readReply(rd) cmd.SetErr(err) if err != nil && !isRedisError(err) { @@ -563,6 +942,11 @@ func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error { func (c *baseClient) txPipelineProcessCmds( ctx context.Context, cn *pool.Conn, cmds []Cmder, ) (bool, error) { + // Process any pending push notifications before executing the transaction pipeline + if err := c.processPushNotifications(ctx, cn); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before transaction: %v", err) + } + if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmds(wr, cmds) }); err != nil { @@ -575,12 +959,13 @@ func (c *baseClient) txPipelineProcessCmds( // Trim multi and exec. trimmedCmds := cmds[1 : len(cmds)-1] - if err := txPipelineReadQueued(rd, statusCmd, trimmedCmds); err != nil { + if err := c.txPipelineReadQueued(ctx, cn, rd, statusCmd, trimmedCmds); err != nil { setCmdsErr(cmds, err) return err } - return pipelineReadCmds(rd, trimmedCmds) + // Read replies. + return c.pipelineReadCmds(ctx, cn, rd, trimmedCmds) }); err != nil { return false, err } @@ -588,19 +973,36 @@ func (c *baseClient) txPipelineProcessCmds( return false, nil } -func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error { +// txPipelineReadQueued reads queued replies from the Redis server. +// It returns an error if the server returns an error or if the number of replies does not match the number of commands. +func (c *baseClient) txPipelineReadQueued(ctx context.Context, cn *pool.Conn, rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error { + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := c.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } // Parse +OK. if err := statusCmd.readReply(rd); err != nil { return err } // Parse +QUEUED. - for range cmds { - if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) { - return err + for _, cmd := range cmds { + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := c.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } + if err := statusCmd.readReply(rd); err != nil { + cmd.SetErr(err) + if !isRedisError(err) { + return err + } } } + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := c.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } // Parse number of replies. line, err := rd.ReadLine() if err != nil { @@ -617,13 +1019,6 @@ func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) return nil } -func (c *baseClient) context(ctx context.Context) context.Context { - if c.opt.ContextTimeoutEnabled { - return ctx - } - return context.Background() -} - //------------------------------------------------------------------------------ // Client is a Redis client representing a pool of zero or more underlying connections. @@ -634,20 +1029,68 @@ func (c *baseClient) context(ctx context.Context) context.Context { type Client struct { *baseClient cmdable - hooksMixin } // NewClient returns a client to the Redis Server specified by Options. func NewClient(opt *Options) *Client { + if opt == nil { + panic("redis: NewClient nil options") + } + // clone to not share options with the caller + opt = opt.clone() opt.init() + // Push notifications are always enabled for RESP3 (cannot be disabled) + c := Client{ baseClient: &baseClient{ opt: opt, }, } c.init() - c.connPool = newConnPool(opt, c.dialHook) + + // Initialize push notification processor using shared helper + // Use void processor for RESP2 connections (push notifications not available) + c.pushProcessor = initializePushProcessor(opt) + // set opt push processor for child clients + c.opt.PushNotificationProcessor = c.pushProcessor + + // Create connection pools + var err error + c.connPool, err = newConnPool(opt, c.dialHook) + if err != nil { + panic(fmt.Errorf("redis: failed to create connection pool: %w", err)) + } + c.pubSubPool, err = newPubSubPool(opt, c.dialHook) + if err != nil { + panic(fmt.Errorf("redis: failed to create pubsub pool: %w", err)) + } + + if opt.StreamingCredentialsProvider != nil { + c.streamingCredentialsManager = streaming.NewManager(c.connPool, c.opt.PoolTimeout) + c.connPool.AddPoolHook(c.streamingCredentialsManager.PoolHook()) + } + + // Initialize maintnotifications first if enabled and protocol is RESP3 + if opt.MaintNotificationsConfig != nil && opt.MaintNotificationsConfig.Mode != maintnotifications.ModeDisabled && opt.Protocol == 3 { + err := c.enableMaintNotificationsUpgrades() + if err != nil { + internal.Logger.Printf(context.Background(), "failed to initialize maintnotifications: %v", err) + if opt.MaintNotificationsConfig.Mode == maintnotifications.ModeEnabled { + /* + Design decision: panic here to fail fast if maintnotifications cannot be enabled when explicitly requested. + We choose to panic instead of returning an error to avoid breaking the existing client API, which does not expect + an error from NewClient. This ensures that misconfiguration or critical initialization failures are surfaced + immediately, rather than allowing the client to continue in a partially initialized or inconsistent state. + Clients relying on maintnotifications should be aware that initialization errors will cause a panic, and should + handle this accordingly (e.g., via recover or by validating configuration before calling NewClient). + This approach is only used when MaintNotificationsConfig.Mode is MaintNotificationsEnabled, indicating that maintnotifications + upgrades are required for correct operation. In other modes, initialization failures are logged but do not panic. + */ + panic(fmt.Errorf("failed to enable maintnotifications: %w", err)) + } + } + } return &c } @@ -670,14 +1113,7 @@ func (c *Client) WithTimeout(timeout time.Duration) *Client { } func (c *Client) Conn() *Conn { - return newConn(c.opt, pool.NewStickyConnPool(c.connPool)) -} - -// Do create a Cmd from the args and processes the cmd. -func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd { - cmd := NewCmd(ctx, args...) - _ = c.Process(ctx, cmd) - return cmd + return newConn(c.opt, pool.NewStickyConnPool(c.connPool), &c.hooksMixin) } func (c *Client) Process(ctx context.Context, cmd Cmder) error { @@ -691,11 +1127,51 @@ func (c *Client) Options() *Options { return c.opt } +// GetMaintNotificationsManager returns the maintnotifications manager instance for monitoring and control. +// Returns nil if maintnotifications are not enabled. +func (c *Client) GetMaintNotificationsManager() *maintnotifications.Manager { + c.maintNotificationsManagerLock.RLock() + defer c.maintNotificationsManagerLock.RUnlock() + return c.maintNotificationsManager +} + +// initializePushProcessor initializes the push notification processor for any client type. +// This is a shared helper to avoid duplication across NewClient, NewFailoverClient, and NewSentinelClient. +func initializePushProcessor(opt *Options) push.NotificationProcessor { + // Always use custom processor if provided + if opt.PushNotificationProcessor != nil { + return opt.PushNotificationProcessor + } + + // Push notifications are always enabled for RESP3, disabled for RESP2 + if opt.Protocol == 3 { + // Create default processor for RESP3 connections + return NewPushNotificationProcessor() + } + + // Create void processor for RESP2 connections (push notifications not available) + return NewVoidPushNotificationProcessor() +} + +// RegisterPushNotificationHandler registers a handler for a specific push notification name. +// Returns an error if a handler is already registered for this push notification name. +// If protected is true, the handler cannot be unregistered. +func (c *Client) RegisterPushNotificationHandler(pushNotificationName string, handler push.NotificationHandler, protected bool) error { + return c.pushProcessor.RegisterHandler(pushNotificationName, handler, protected) +} + +// GetPushNotificationHandler returns the handler for a specific push notification name. +// Returns nil if no handler is registered for the given name. +func (c *Client) GetPushNotificationHandler(pushNotificationName string) push.NotificationHandler { + return c.pushProcessor.GetHandler(pushNotificationName) +} + type PoolStats pool.Stats // PoolStats returns connection pool stats. func (c *Client) PoolStats() *PoolStats { stats := c.connPool.Stats() + stats.PubSubStats = *(c.pubSubPool.Stats()) return (*PoolStats)(stats) } @@ -730,13 +1206,31 @@ func (c *Client) TxPipeline() Pipeliner { func (c *Client) pubSub() *PubSub { pubsub := &PubSub{ opt: c.opt, - - newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { - return c.newConn(ctx) + newConn: func(ctx context.Context, addr string, channels []string) (*pool.Conn, error) { + cn, err := c.pubSubPool.NewConn(ctx, c.opt.Network, addr, channels) + if err != nil { + return nil, err + } + // will return nil if already initialized + err = c.initConn(ctx, cn) + if err != nil { + _ = cn.Close() + return nil, err + } + // Track connection in PubSubPool + c.pubSubPool.TrackConn(cn) + return cn, nil + }, + closeConn: func(cn *pool.Conn) error { + // Untrack connection from PubSubPool + c.pubSubPool.UntrackConn(cn) + _ = cn.Close() + return nil }, - closeConn: c.connPool.CloseConn, + pushProcessor: c.pushProcessor, } pubsub.init() + return pubsub } @@ -803,10 +1297,12 @@ type Conn struct { baseClient cmdable statefulCmdable - hooksMixin } -func newConn(opt *Options, connPool pool.Pooler) *Conn { +// newConn is a helper func to create a new Conn instance. +// the Conn instance is not thread-safe and should not be shared between goroutines. +// the parentHooks will be cloned, no need to clone before passing it. +func newConn(opt *Options, connPool pool.Pooler, parentHooks *hooksMixin) *Conn { c := Conn{ baseClient: baseClient{ opt: opt, @@ -814,6 +1310,14 @@ func newConn(opt *Options, connPool pool.Pooler) *Conn { }, } + if parentHooks != nil { + c.hooksMixin = parentHooks.clone() + } + + // Initialize push notification processor using shared helper + // Use void processor for RESP2 connections (push notifications not available) + c.pushProcessor = initializePushProcessor(opt) + c.cmdable = c.Process c.statefulCmdable = c.Process c.initHooks(hooks{ @@ -832,6 +1336,13 @@ func (c *Conn) Process(ctx context.Context, cmd Cmder) error { return err } +// RegisterPushNotificationHandler registers a handler for a specific push notification name. +// Returns an error if a handler is already registered for this push notification name. +// If protected is true, the handler cannot be unregistered. +func (c *Conn) RegisterPushNotificationHandler(pushNotificationName string, handler push.NotificationHandler, protected bool) error { + return c.pushProcessor.RegisterHandler(pushNotificationName, handler, protected) +} + func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { return c.Pipeline().Pipelined(ctx, fn) } @@ -859,3 +1370,78 @@ func (c *Conn) TxPipeline() Pipeliner { pipe.init() return &pipe } + +// processPushNotifications processes all pending push notifications on a connection +// This ensures that cluster topology changes are handled immediately before the connection is used +// This method should be called by the client before using WithReader for command execution +// +// Performance optimization: Skip the expensive MaybeHasData() syscall if a health check +// was performed recently (within 5 seconds). The health check already verified the connection +// is healthy and checked for unexpected data (push notifications). +func (c *baseClient) processPushNotifications(ctx context.Context, cn *pool.Conn) error { + // Only process push notifications for RESP3 connections with a processor + if c.opt.Protocol != 3 || c.pushProcessor == nil { + return nil + } + + // Performance optimization: Skip MaybeHasData() syscall if health check was recent + // If the connection was health-checked within the last 5 seconds, we can skip the + // expensive syscall since the health check already verified no unexpected data. + // This is safe because: + // 0. lastHealthCheckNs is set in pool/conn.go:putConn() after a successful health check + // 1. Health check (connCheck) uses the same syscall (Recvfrom with MSG_PEEK) + // 2. If push notifications arrived, they would have been detected by health check + // 3. 5 seconds is short enough that connection state is still fresh + // 4. Push notifications will be processed by the next WithReader call + // used it is set on getConn, so we should use another timer (lastPutAt?) + lastHealthCheckNs := cn.LastPutAtNs() + if lastHealthCheckNs > 0 { + // Use pool's cached time to avoid expensive time.Now() syscall + nowNs := pool.GetCachedTimeNs() + if nowNs-lastHealthCheckNs < int64(5*time.Second) { + // Recent health check confirmed no unexpected data, skip the syscall + return nil + } + } + + // Check if there is any data to read before processing + // This is an optimization on UNIX systems where MaybeHasData is a syscall + // On Windows, MaybeHasData always returns true, so this check is a no-op + if !cn.MaybeHasData() { + return nil + } + + // Use WithReader to access the reader and process push notifications + // This is critical for maintnotifications to work properly + // NOTE: almost no timeouts are set for this read, so it should not block + // longer than necessary, 10us should be plenty of time to read if there are any push notifications + // on the socket. + return cn.WithReader(ctx, 10*time.Microsecond, func(rd *proto.Reader) error { + // Create handler context with client, connection pool, and connection information + handlerCtx := c.pushNotificationHandlerContext(cn) + return c.pushProcessor.ProcessPendingNotifications(ctx, handlerCtx, rd) + }) +} + +// processPendingPushNotificationWithReader processes all pending push notifications on a connection +// This method should be called by the client in WithReader before reading the reply +func (c *baseClient) processPendingPushNotificationWithReader(ctx context.Context, cn *pool.Conn, rd *proto.Reader) error { + // if we have the reader, we don't need to check for data on the socket, we are waiting + // for either a reply or a push notification, so we can block until we get a reply or reach the timeout + if c.opt.Protocol != 3 || c.pushProcessor == nil { + return nil + } + + // Create handler context with client, connection pool, and connection information + handlerCtx := c.pushNotificationHandlerContext(cn) + return c.pushProcessor.ProcessPendingNotifications(ctx, handlerCtx, rd) +} + +// pushNotificationHandlerContext creates a handler context for push notification processing +func (c *baseClient) pushNotificationHandlerContext(cn *pool.Conn) push.NotificationHandlerContext { + return push.NotificationHandlerContext{ + Client: c, + ConnPool: c.connPool, + Conn: cn, // Wrap in adapter for easier interface access + } +} diff --git a/vendor/github.com/redis/go-redis/v9/result.go b/vendor/github.com/redis/go-redis/v9/result.go index cfd4cf92e..3e0d0a134 100644 --- a/vendor/github.com/redis/go-redis/v9/result.go +++ b/vendor/github.com/redis/go-redis/v9/result.go @@ -82,6 +82,14 @@ func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd { return &cmd } +// NewFloatSliceResult returns a FloatSliceCmd initialised with val and err for testing. +func NewFloatSliceResult(val []float64, err error) *FloatSliceCmd { + var cmd FloatSliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + // NewMapStringStringResult returns a MapStringStringCmd initialised with val and err for testing. func NewMapStringStringResult(val map[string]string, err error) *MapStringStringCmd { var cmd MapStringStringCmd diff --git a/vendor/github.com/redis/go-redis/v9/ring.go b/vendor/github.com/redis/go-redis/v9/ring.go index fa76aaf0d..3381460ab 100644 --- a/vendor/github.com/redis/go-redis/v9/ring.go +++ b/vendor/github.com/redis/go-redis/v9/ring.go @@ -13,15 +13,23 @@ import ( "github.com/cespare/xxhash/v2" "github.com/dgryski/go-rendezvous" //nolint + "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/hashtag" "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/internal/proto" "github.com/redis/go-redis/v9/internal/rand" ) var errRingShardsDown = errors.New("redis: all ring shards are down") +// defaultHeartbeatFn is the default function used to check the shard liveness +var defaultHeartbeatFn = func(ctx context.Context, client *Client) bool { + err := client.Ping(ctx).Err() + return err == nil || err == pool.ErrPoolTimeout +} + //------------------------------------------------------------------------------ type ConsistentHash interface { @@ -54,10 +62,14 @@ type RingOptions struct { // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn. ClientName string - // Frequency of PING commands sent to check shards availability. + // Frequency of executing HeartbeatFn to check shards availability. // Shard is considered down after 3 subsequent failed checks. HeartbeatFrequency time.Duration + // A function used to check the shard liveness + // if not set, defaults to defaultHeartbeatFn + HeartbeatFn func(ctx context.Context, client *Client) bool + // NewConsistentHash returns a consistent hash that is used // to distribute keys across the shards. // @@ -73,7 +85,24 @@ type RingOptions struct { Protocol int Username string Password string - DB int + // CredentialsProvider allows the username and password to be updated + // before reconnecting. It should return the current username and password. + CredentialsProvider func() (username string, password string) + + // CredentialsProviderContext is an enhanced parameter of CredentialsProvider, + // done to maintain API compatibility. In the future, + // there might be a merge between CredentialsProviderContext and CredentialsProvider. + // There will be a conflict between them; if CredentialsProviderContext exists, we will ignore CredentialsProvider. + CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) + + // StreamingCredentialsProvider is used to retrieve the credentials + // for the connection from an external source. Those credentials may change + // during the connection lifetime. This is useful for managed identity + // scenarios where the credentials are retrieved from an external source. + // + // Currently, this is a placeholder for the future implementation. + StreamingCredentialsProvider auth.StreamingCredentialsProvider + DB int MaxRetries int MinRetryBackoff time.Duration @@ -95,6 +124,20 @@ type RingOptions struct { ConnMaxIdleTime time.Duration ConnMaxLifetime time.Duration + // ReadBufferSize is the size of the bufio.Reader buffer for each connection. + // Larger buffers can improve performance for commands that return large responses. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + ReadBufferSize int + + // WriteBufferSize is the size of the bufio.Writer buffer for each connection. + // Larger buffers can improve performance for large pipelines and commands with many arguments. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + WriteBufferSize int + TLSConfig *tls.Config Limiter Limiter @@ -110,6 +153,7 @@ type RingOptions struct { // default: false DisableIdentity bool IdentitySuffix string + UnstableResp3 bool } func (opt *RingOptions) init() { @@ -123,13 +167,18 @@ func (opt *RingOptions) init() { opt.HeartbeatFrequency = 500 * time.Millisecond } + if opt.HeartbeatFn == nil { + opt.HeartbeatFn = defaultHeartbeatFn + } + if opt.NewConsistentHash == nil { opt.NewConsistentHash = newRendezvous } - if opt.MaxRetries == -1 { + switch opt.MaxRetries { + case -1: opt.MaxRetries = 0 - } else if opt.MaxRetries == 0 { + case 0: opt.MaxRetries = 3 } switch opt.MinRetryBackoff { @@ -144,6 +193,13 @@ func (opt *RingOptions) init() { case 0: opt.MaxRetryBackoff = 512 * time.Millisecond } + + if opt.ReadBufferSize == 0 { + opt.ReadBufferSize = proto.DefaultBufferSize + } + if opt.WriteBufferSize == 0 { + opt.WriteBufferSize = proto.DefaultBufferSize + } } func (opt *RingOptions) clientOptions() *Options { @@ -152,10 +208,13 @@ func (opt *RingOptions) clientOptions() *Options { Dialer: opt.Dialer, OnConnect: opt.OnConnect, - Protocol: opt.Protocol, - Username: opt.Username, - Password: opt.Password, - DB: opt.DB, + Protocol: opt.Protocol, + Username: opt.Username, + Password: opt.Password, + CredentialsProvider: opt.CredentialsProvider, + CredentialsProviderContext: opt.CredentialsProviderContext, + StreamingCredentialsProvider: opt.StreamingCredentialsProvider, + DB: opt.DB, MaxRetries: -1, @@ -172,13 +231,17 @@ func (opt *RingOptions) clientOptions() *Options { MaxActiveConns: opt.MaxActiveConns, ConnMaxIdleTime: opt.ConnMaxIdleTime, ConnMaxLifetime: opt.ConnMaxLifetime, + ReadBufferSize: opt.ReadBufferSize, + WriteBufferSize: opt.WriteBufferSize, TLSConfig: opt.TLSConfig, Limiter: opt.Limiter, DisableIdentity: opt.DisableIdentity, DisableIndentity: opt.DisableIndentity, - IdentitySuffix: opt.IdentitySuffix, + + IdentitySuffix: opt.IdentitySuffix, + UnstableResp3: opt.UnstableResp3, } } @@ -345,16 +408,16 @@ func (c *ringSharding) newRingShards( return } +// Warning: External exposure of `c.shards.list` may cause data races. +// So keep internal or implement deep copy if exposed. func (c *ringSharding) List() []*ringShard { - var list []*ringShard - c.mu.RLock() - if !c.closed { - list = c.shards.list - } - c.mu.RUnlock() + defer c.mu.RUnlock() - return list + if c.closed { + return nil + } + return c.shards.list } func (c *ringSharding) Hash(key string) string { @@ -401,7 +464,12 @@ func (c *ringSharding) GetByName(shardName string) (*ringShard, error) { c.mu.RLock() defer c.mu.RUnlock() - return c.shards.m[shardName], nil + shard, ok := c.shards.m[shardName] + if !ok { + return nil, errors.New("redis: the shard is not in the ring") + } + + return shard, nil } func (c *ringSharding) Random() (*ringShard, error) { @@ -418,9 +486,9 @@ func (c *ringSharding) Heartbeat(ctx context.Context, frequency time.Duration) { case <-ticker.C: var rebalance bool + // note: `c.List()` return a shadow copy of `[]*ringShard`. for _, shard := range c.List() { - err := shard.Client.Ping(ctx).Err() - isUp := err == nil || err == pool.ErrPoolTimeout + isUp := c.opt.HeartbeatFn(ctx, shard.Client) if shard.Vote(isUp) { internal.Logger.Printf(ctx, "ring shard state changed: %s", shard) rebalance = true @@ -518,6 +586,9 @@ type Ring struct { } func NewRing(opt *RingOptions) *Ring { + if opt == nil { + panic("redis: NewRing nil options") + } opt.init() hbCtx, hbCancel := context.WithCancel(context.Background()) @@ -550,13 +621,6 @@ func (c *Ring) SetAddrs(addrs map[string]string) { c.sharding.SetAddrs(addrs) } -// Do create a Cmd from the args and processes the cmd. -func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd { - cmd := NewCmd(ctx, args...) - _ = c.Process(ctx, cmd) - return cmd -} - func (c *Ring) Process(ctx context.Context, cmd Cmder) error { err := c.processHook(ctx, cmd) cmd.SetErr(err) @@ -574,6 +638,7 @@ func (c *Ring) retryBackoff(attempt int) time.Duration { // PoolStats returns accumulated connection pool stats. func (c *Ring) PoolStats() *PoolStats { + // note: `c.List()` return a shadow copy of `[]*ringShard`. shards := c.sharding.List() var acc PoolStats for _, shard := range shards { @@ -643,6 +708,7 @@ func (c *Ring) ForEachShard( ctx context.Context, fn func(ctx context.Context, client *Client) error, ) error { + // note: `c.List()` return a shadow copy of `[]*ringShard`. shards := c.sharding.List() var wg sync.WaitGroup errCh := make(chan error, 1) @@ -674,6 +740,7 @@ func (c *Ring) ForEachShard( } func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { + // note: `c.List()` return a shadow copy of `[]*ringShard`. shards := c.sharding.List() var firstErr error for _, shard := range shards { @@ -691,7 +758,7 @@ func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { return nil, firstErr } -func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) { +func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) { pos := cmdFirstKeyPos(cmd) if pos == 0 { return c.sharding.Random() @@ -709,7 +776,7 @@ func (c *Ring) process(ctx context.Context, cmd Cmder) error { } } - shard, err := c.cmdShard(ctx, cmd) + shard, err := c.cmdShard(cmd) if err != nil { return err } @@ -768,6 +835,8 @@ func (c *Ring) generalProcessPipeline( } var wg sync.WaitGroup + errs := make(chan error, len(cmdsMap)) + for hash, cmds := range cmdsMap { wg.Add(1) go func(hash string, cmds []Cmder) { @@ -780,16 +849,24 @@ func (c *Ring) generalProcessPipeline( return } + hook := shard.Client.processPipelineHook if tx { cmds = wrapMultiExec(ctx, cmds) - _ = shard.Client.processTxPipelineHook(ctx, cmds) - } else { - _ = shard.Client.processPipelineHook(ctx, cmds) + hook = shard.Client.processTxPipelineHook + } + + if err = hook(ctx, cmds); err != nil { + errs <- err } }(hash, cmds) } wg.Wait() + close(errs) + + if err := <-errs; err != nil { + return err + } return cmdsFirstErr(cmds) } @@ -802,7 +879,7 @@ func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) er for _, key := range keys { if key != "" { - shard, err := c.sharding.GetByKey(hashtag.Key(key)) + shard, err := c.sharding.GetByKey(key) if err != nil { return err } @@ -836,3 +913,26 @@ func (c *Ring) Close() error { return c.sharding.Close() } + +// GetShardClients returns a list of all shard clients in the ring. +// This can be used to create dedicated connections (e.g., PubSub) for each shard. +func (c *Ring) GetShardClients() []*Client { + shards := c.sharding.List() + clients := make([]*Client, 0, len(shards)) + for _, shard := range shards { + if shard.IsUp() { + clients = append(clients, shard.Client) + } + } + return clients +} + +// GetShardClientForKey returns the shard client that would handle the given key. +// This can be used to determine which shard a particular key/channel would be routed to. +func (c *Ring) GetShardClientForKey(key string) (*Client, error) { + shard, err := c.sharding.GetByKey(key) + if err != nil { + return nil, err + } + return shard.Client, nil +} diff --git a/vendor/github.com/redis/go-redis/v9/search_builders.go b/vendor/github.com/redis/go-redis/v9/search_builders.go new file mode 100644 index 000000000..91f063404 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/search_builders.go @@ -0,0 +1,825 @@ +package redis + +import ( + "context" +) + +// ---------------------- +// Search Module Builders +// ---------------------- + +// SearchBuilder provides a fluent API for FT.SEARCH +// (see original FTSearchOptions for all options). +// EXPERIMENTAL: this API is subject to change, use with caution. +type SearchBuilder struct { + c *Client + ctx context.Context + index string + query string + options *FTSearchOptions +} + +// NewSearchBuilder creates a new SearchBuilder for FT.SEARCH commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewSearchBuilder(ctx context.Context, index, query string) *SearchBuilder { + b := &SearchBuilder{c: c, ctx: ctx, index: index, query: query, options: &FTSearchOptions{LimitOffset: -1}} + return b +} + +// WithScores includes WITHSCORES. +func (b *SearchBuilder) WithScores() *SearchBuilder { + b.options.WithScores = true + return b +} + +// NoContent includes NOCONTENT. +func (b *SearchBuilder) NoContent() *SearchBuilder { b.options.NoContent = true; return b } + +// Verbatim includes VERBATIM. +func (b *SearchBuilder) Verbatim() *SearchBuilder { b.options.Verbatim = true; return b } + +// NoStopWords includes NOSTOPWORDS. +func (b *SearchBuilder) NoStopWords() *SearchBuilder { b.options.NoStopWords = true; return b } + +// WithPayloads includes WITHPAYLOADS. +func (b *SearchBuilder) WithPayloads() *SearchBuilder { + b.options.WithPayloads = true + return b +} + +// WithSortKeys includes WITHSORTKEYS. +func (b *SearchBuilder) WithSortKeys() *SearchBuilder { + b.options.WithSortKeys = true + return b +} + +// Filter adds a FILTER clause: FILTER . +func (b *SearchBuilder) Filter(field string, min, max interface{}) *SearchBuilder { + b.options.Filters = append(b.options.Filters, FTSearchFilter{ + FieldName: field, + Min: min, + Max: max, + }) + return b +} + +// GeoFilter adds a GEOFILTER clause: GEOFILTER . +func (b *SearchBuilder) GeoFilter(field string, lon, lat, radius float64, unit string) *SearchBuilder { + b.options.GeoFilter = append(b.options.GeoFilter, FTSearchGeoFilter{ + FieldName: field, + Longitude: lon, + Latitude: lat, + Radius: radius, + Unit: unit, + }) + return b +} + +// InKeys restricts the search to the given keys. +func (b *SearchBuilder) InKeys(keys ...interface{}) *SearchBuilder { + b.options.InKeys = append(b.options.InKeys, keys...) + return b +} + +// InFields restricts the search to the given fields. +func (b *SearchBuilder) InFields(fields ...interface{}) *SearchBuilder { + b.options.InFields = append(b.options.InFields, fields...) + return b +} + +// ReturnFields adds simple RETURN ... +func (b *SearchBuilder) ReturnFields(fields ...string) *SearchBuilder { + for _, f := range fields { + b.options.Return = append(b.options.Return, FTSearchReturn{FieldName: f}) + } + return b +} + +// ReturnAs adds RETURN AS . +func (b *SearchBuilder) ReturnAs(field, alias string) *SearchBuilder { + b.options.Return = append(b.options.Return, FTSearchReturn{FieldName: field, As: alias}) + return b +} + +// Slop adds SLOP . +func (b *SearchBuilder) Slop(slop int) *SearchBuilder { + b.options.Slop = slop + return b +} + +// Timeout adds TIMEOUT . +func (b *SearchBuilder) Timeout(timeout int) *SearchBuilder { + b.options.Timeout = timeout + return b +} + +// InOrder includes INORDER. +func (b *SearchBuilder) InOrder() *SearchBuilder { + b.options.InOrder = true + return b +} + +// Language sets LANGUAGE . +func (b *SearchBuilder) Language(lang string) *SearchBuilder { + b.options.Language = lang + return b +} + +// Expander sets EXPANDER . +func (b *SearchBuilder) Expander(expander string) *SearchBuilder { + b.options.Expander = expander + return b +} + +// Scorer sets SCORER . +func (b *SearchBuilder) Scorer(scorer string) *SearchBuilder { + b.options.Scorer = scorer + return b +} + +// ExplainScore includes EXPLAINSCORE. +func (b *SearchBuilder) ExplainScore() *SearchBuilder { + b.options.ExplainScore = true + return b +} + +// Payload sets PAYLOAD . +func (b *SearchBuilder) Payload(payload string) *SearchBuilder { + b.options.Payload = payload + return b +} + +// SortBy adds SORTBY ASC|DESC. +func (b *SearchBuilder) SortBy(field string, asc bool) *SearchBuilder { + b.options.SortBy = append(b.options.SortBy, FTSearchSortBy{ + FieldName: field, + Asc: asc, + Desc: !asc, + }) + return b +} + +// WithSortByCount includes WITHCOUNT (when used with SortBy). +func (b *SearchBuilder) WithSortByCount() *SearchBuilder { + b.options.SortByWithCount = true + return b +} + +// Param adds a single PARAMS . +func (b *SearchBuilder) Param(key string, value interface{}) *SearchBuilder { + if b.options.Params == nil { + b.options.Params = make(map[string]interface{}, 1) + } + b.options.Params[key] = value + return b +} + +// ParamsMap adds multiple PARAMS at once. +func (b *SearchBuilder) ParamsMap(p map[string]interface{}) *SearchBuilder { + if b.options.Params == nil { + b.options.Params = make(map[string]interface{}, len(p)) + } + for k, v := range p { + b.options.Params[k] = v + } + return b +} + +// Dialect sets DIALECT . +func (b *SearchBuilder) Dialect(version int) *SearchBuilder { + b.options.DialectVersion = version + return b +} + +// Limit sets OFFSET and COUNT. CountOnly uses LIMIT 0 0. +func (b *SearchBuilder) Limit(offset, count int) *SearchBuilder { + b.options.LimitOffset = offset + b.options.Limit = count + return b +} +func (b *SearchBuilder) CountOnly() *SearchBuilder { b.options.CountOnly = true; return b } + +// Run executes FT.SEARCH and returns a typed result. +func (b *SearchBuilder) Run() (FTSearchResult, error) { + cmd := b.c.FTSearchWithArgs(b.ctx, b.index, b.query, b.options) + return cmd.Result() +} + +// ---------------------- +// AggregateBuilder for FT.AGGREGATE +// ---------------------- + +type AggregateBuilder struct { + c *Client + ctx context.Context + index string + query string + options *FTAggregateOptions +} + +// NewAggregateBuilder creates a new AggregateBuilder for FT.AGGREGATE commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewAggregateBuilder(ctx context.Context, index, query string) *AggregateBuilder { + return &AggregateBuilder{c: c, ctx: ctx, index: index, query: query, options: &FTAggregateOptions{LimitOffset: -1}} +} + +// Verbatim includes VERBATIM. +func (b *AggregateBuilder) Verbatim() *AggregateBuilder { b.options.Verbatim = true; return b } + +// AddScores includes ADDSCORES. +func (b *AggregateBuilder) AddScores() *AggregateBuilder { b.options.AddScores = true; return b } + +// Scorer sets SCORER . +func (b *AggregateBuilder) Scorer(s string) *AggregateBuilder { + b.options.Scorer = s + return b +} + +// LoadAll includes LOAD * (mutually exclusive with Load). +func (b *AggregateBuilder) LoadAll() *AggregateBuilder { + b.options.LoadAll = true + return b +} + +// Load adds LOAD [AS alias]... +// You can call it multiple times for multiple fields. +func (b *AggregateBuilder) Load(field string, alias ...string) *AggregateBuilder { + // each Load entry becomes one element in options.Load + l := FTAggregateLoad{Field: field} + if len(alias) > 0 { + l.As = alias[0] + } + b.options.Load = append(b.options.Load, l) + return b +} + +// Timeout sets TIMEOUT . +func (b *AggregateBuilder) Timeout(ms int) *AggregateBuilder { + b.options.Timeout = ms + return b +} + +// Apply adds APPLY [AS alias]. +func (b *AggregateBuilder) Apply(field string, alias ...string) *AggregateBuilder { + a := FTAggregateApply{Field: field} + if len(alias) > 0 { + a.As = alias[0] + } + b.options.Apply = append(b.options.Apply, a) + return b +} + +// GroupBy starts a new GROUPBY clause. +func (b *AggregateBuilder) GroupBy(fields ...interface{}) *AggregateBuilder { + b.options.GroupBy = append(b.options.GroupBy, FTAggregateGroupBy{ + Fields: fields, + }) + return b +} + +// Reduce adds a REDUCE [<#args> ] clause to the *last* GROUPBY. +func (b *AggregateBuilder) Reduce(fn SearchAggregator, args ...interface{}) *AggregateBuilder { + if len(b.options.GroupBy) == 0 { + // no GROUPBY yet — nothing to attach to + return b + } + idx := len(b.options.GroupBy) - 1 + b.options.GroupBy[idx].Reduce = append(b.options.GroupBy[idx].Reduce, FTAggregateReducer{ + Reducer: fn, + Args: args, + }) + return b +} + +// ReduceAs does the same but also sets an alias: REDUCE … AS +func (b *AggregateBuilder) ReduceAs(fn SearchAggregator, alias string, args ...interface{}) *AggregateBuilder { + if len(b.options.GroupBy) == 0 { + return b + } + idx := len(b.options.GroupBy) - 1 + b.options.GroupBy[idx].Reduce = append(b.options.GroupBy[idx].Reduce, FTAggregateReducer{ + Reducer: fn, + Args: args, + As: alias, + }) + return b +} + +// SortBy adds SORTBY ASC|DESC. +func (b *AggregateBuilder) SortBy(field string, asc bool) *AggregateBuilder { + sb := FTAggregateSortBy{FieldName: field, Asc: asc, Desc: !asc} + b.options.SortBy = append(b.options.SortBy, sb) + return b +} + +// SortByMax sets MAX (only if SortBy was called). +func (b *AggregateBuilder) SortByMax(max int) *AggregateBuilder { + b.options.SortByMax = max + return b +} + +// Filter sets FILTER . +func (b *AggregateBuilder) Filter(expr string) *AggregateBuilder { + b.options.Filter = expr + return b +} + +// WithCursor enables WITHCURSOR [COUNT ] [MAXIDLE ]. +func (b *AggregateBuilder) WithCursor(count, maxIdle int) *AggregateBuilder { + b.options.WithCursor = true + if b.options.WithCursorOptions == nil { + b.options.WithCursorOptions = &FTAggregateWithCursor{} + } + b.options.WithCursorOptions.Count = count + b.options.WithCursorOptions.MaxIdle = maxIdle + return b +} + +// Params adds PARAMS pairs. +func (b *AggregateBuilder) Params(p map[string]interface{}) *AggregateBuilder { + if b.options.Params == nil { + b.options.Params = make(map[string]interface{}, len(p)) + } + for k, v := range p { + b.options.Params[k] = v + } + return b +} + +// Dialect sets DIALECT . +func (b *AggregateBuilder) Dialect(version int) *AggregateBuilder { + b.options.DialectVersion = version + return b +} + +// Run executes FT.AGGREGATE and returns a typed result. +func (b *AggregateBuilder) Run() (*FTAggregateResult, error) { + cmd := b.c.FTAggregateWithArgs(b.ctx, b.index, b.query, b.options) + return cmd.Result() +} + +// ---------------------- +// CreateIndexBuilder for FT.CREATE +// ---------------------- +// CreateIndexBuilder is builder for FT.CREATE +// EXPERIMENTAL: this API is subject to change, use with caution. +type CreateIndexBuilder struct { + c *Client + ctx context.Context + index string + options *FTCreateOptions + schema []*FieldSchema +} + +// NewCreateIndexBuilder creates a new CreateIndexBuilder for FT.CREATE commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewCreateIndexBuilder(ctx context.Context, index string) *CreateIndexBuilder { + return &CreateIndexBuilder{c: c, ctx: ctx, index: index, options: &FTCreateOptions{}} +} + +// OnHash sets ON HASH. +func (b *CreateIndexBuilder) OnHash() *CreateIndexBuilder { b.options.OnHash = true; return b } + +// OnJSON sets ON JSON. +func (b *CreateIndexBuilder) OnJSON() *CreateIndexBuilder { b.options.OnJSON = true; return b } + +// Prefix sets PREFIX. +func (b *CreateIndexBuilder) Prefix(prefixes ...interface{}) *CreateIndexBuilder { + b.options.Prefix = prefixes + return b +} + +// Filter sets FILTER. +func (b *CreateIndexBuilder) Filter(filter string) *CreateIndexBuilder { + b.options.Filter = filter + return b +} + +// DefaultLanguage sets LANGUAGE. +func (b *CreateIndexBuilder) DefaultLanguage(lang string) *CreateIndexBuilder { + b.options.DefaultLanguage = lang + return b +} + +// LanguageField sets LANGUAGE_FIELD. +func (b *CreateIndexBuilder) LanguageField(field string) *CreateIndexBuilder { + b.options.LanguageField = field + return b +} + +// Score sets SCORE. +func (b *CreateIndexBuilder) Score(score float64) *CreateIndexBuilder { + b.options.Score = score + return b +} + +// ScoreField sets SCORE_FIELD. +func (b *CreateIndexBuilder) ScoreField(field string) *CreateIndexBuilder { + b.options.ScoreField = field + return b +} + +// PayloadField sets PAYLOAD_FIELD. +func (b *CreateIndexBuilder) PayloadField(field string) *CreateIndexBuilder { + b.options.PayloadField = field + return b +} + +// NoOffsets includes NOOFFSETS. +func (b *CreateIndexBuilder) NoOffsets() *CreateIndexBuilder { b.options.NoOffsets = true; return b } + +// Temporary sets TEMPORARY seconds. +func (b *CreateIndexBuilder) Temporary(sec int) *CreateIndexBuilder { + b.options.Temporary = sec + return b +} + +// NoHL includes NOHL. +func (b *CreateIndexBuilder) NoHL() *CreateIndexBuilder { b.options.NoHL = true; return b } + +// NoFields includes NOFIELDS. +func (b *CreateIndexBuilder) NoFields() *CreateIndexBuilder { b.options.NoFields = true; return b } + +// NoFreqs includes NOFREQS. +func (b *CreateIndexBuilder) NoFreqs() *CreateIndexBuilder { b.options.NoFreqs = true; return b } + +// StopWords sets STOPWORDS. +func (b *CreateIndexBuilder) StopWords(words ...interface{}) *CreateIndexBuilder { + b.options.StopWords = words + return b +} + +// SkipInitialScan includes SKIPINITIALSCAN. +func (b *CreateIndexBuilder) SkipInitialScan() *CreateIndexBuilder { + b.options.SkipInitialScan = true + return b +} + +// Schema adds a FieldSchema. +func (b *CreateIndexBuilder) Schema(field *FieldSchema) *CreateIndexBuilder { + b.schema = append(b.schema, field) + return b +} + +// Run executes FT.CREATE and returns the status. +func (b *CreateIndexBuilder) Run() (string, error) { + cmd := b.c.FTCreate(b.ctx, b.index, b.options, b.schema...) + return cmd.Result() +} + +// ---------------------- +// DropIndexBuilder for FT.DROPINDEX +// ---------------------- +// DropIndexBuilder is a builder for FT.DROPINDEX +// EXPERIMENTAL: this API is subject to change, use with caution. +type DropIndexBuilder struct { + c *Client + ctx context.Context + index string + options *FTDropIndexOptions +} + +// NewDropIndexBuilder creates a new DropIndexBuilder for FT.DROPINDEX commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewDropIndexBuilder(ctx context.Context, index string) *DropIndexBuilder { + return &DropIndexBuilder{c: c, ctx: ctx, index: index} +} + +// DeleteRuncs includes DD. +func (b *DropIndexBuilder) DeleteDocs() *DropIndexBuilder { b.options.DeleteDocs = true; return b } + +// Run executes FT.DROPINDEX. +func (b *DropIndexBuilder) Run() (string, error) { + cmd := b.c.FTDropIndexWithArgs(b.ctx, b.index, b.options) + return cmd.Result() +} + +// ---------------------- +// AliasBuilder for FT.ALIAS* commands +// ---------------------- +// AliasBuilder is builder for FT.ALIAS* commands +// EXPERIMENTAL: this API is subject to change, use with caution. +type AliasBuilder struct { + c *Client + ctx context.Context + alias string + index string + action string // add|del|update +} + +// NewAliasBuilder creates a new AliasBuilder for FT.ALIAS* commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewAliasBuilder(ctx context.Context, alias string) *AliasBuilder { + return &AliasBuilder{c: c, ctx: ctx, alias: alias} +} + +// Action sets the action for the alias builder. +func (b *AliasBuilder) Action(action string) *AliasBuilder { + b.action = action + return b +} + +// Add sets the action to "add" and requires an index. +func (b *AliasBuilder) Add(index string) *AliasBuilder { + b.action = "add" + b.index = index + return b +} + +// Del sets the action to "del". +func (b *AliasBuilder) Del() *AliasBuilder { + b.action = "del" + return b +} + +// Update sets the action to "update" and requires an index. +func (b *AliasBuilder) Update(index string) *AliasBuilder { + b.action = "update" + b.index = index + return b +} + +// Run executes the configured alias command. +func (b *AliasBuilder) Run() (string, error) { + switch b.action { + case "add": + cmd := b.c.FTAliasAdd(b.ctx, b.index, b.alias) + return cmd.Result() + case "del": + cmd := b.c.FTAliasDel(b.ctx, b.alias) + return cmd.Result() + case "update": + cmd := b.c.FTAliasUpdate(b.ctx, b.index, b.alias) + return cmd.Result() + } + return "", nil +} + +// ---------------------- +// ExplainBuilder for FT.EXPLAIN +// ---------------------- +// ExplainBuilder is builder for FT.EXPLAIN +// EXPERIMENTAL: this API is subject to change, use with caution. +type ExplainBuilder struct { + c *Client + ctx context.Context + index string + query string + options *FTExplainOptions +} + +// NewExplainBuilder creates a new ExplainBuilder for FT.EXPLAIN commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewExplainBuilder(ctx context.Context, index, query string) *ExplainBuilder { + return &ExplainBuilder{c: c, ctx: ctx, index: index, query: query, options: &FTExplainOptions{}} +} + +// Dialect sets dialect for EXPLAINCLI. +func (b *ExplainBuilder) Dialect(d string) *ExplainBuilder { b.options.Dialect = d; return b } + +// Run executes FT.EXPLAIN and returns the plan. +func (b *ExplainBuilder) Run() (string, error) { + cmd := b.c.FTExplainWithArgs(b.ctx, b.index, b.query, b.options) + return cmd.Result() +} + +// ---------------------- +// InfoBuilder for FT.INFO +// ---------------------- + +type FTInfoBuilder struct { + c *Client + ctx context.Context + index string +} + +// NewSearchInfoBuilder creates a new FTInfoBuilder for FT.INFO commands. +func (c *Client) NewSearchInfoBuilder(ctx context.Context, index string) *FTInfoBuilder { + return &FTInfoBuilder{c: c, ctx: ctx, index: index} +} + +// Run executes FT.INFO and returns detailed info. +func (b *FTInfoBuilder) Run() (FTInfoResult, error) { + cmd := b.c.FTInfo(b.ctx, b.index) + return cmd.Result() +} + +// ---------------------- +// SpellCheckBuilder for FT.SPELLCHECK +// ---------------------- +// SpellCheckBuilder is builder for FT.SPELLCHECK +// EXPERIMENTAL: this API is subject to change, use with caution. +type SpellCheckBuilder struct { + c *Client + ctx context.Context + index string + query string + options *FTSpellCheckOptions +} + +// NewSpellCheckBuilder creates a new SpellCheckBuilder for FT.SPELLCHECK commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewSpellCheckBuilder(ctx context.Context, index, query string) *SpellCheckBuilder { + return &SpellCheckBuilder{c: c, ctx: ctx, index: index, query: query, options: &FTSpellCheckOptions{}} +} + +// Distance sets MAXDISTANCE. +func (b *SpellCheckBuilder) Distance(d int) *SpellCheckBuilder { b.options.Distance = d; return b } + +// Terms sets INCLUDE or EXCLUDE terms. +func (b *SpellCheckBuilder) Terms(include bool, dictionary string, terms ...interface{}) *SpellCheckBuilder { + if b.options.Terms == nil { + b.options.Terms = &FTSpellCheckTerms{} + } + if include { + b.options.Terms.Inclusion = "INCLUDE" + } else { + b.options.Terms.Inclusion = "EXCLUDE" + } + b.options.Terms.Dictionary = dictionary + b.options.Terms.Terms = terms + return b +} + +// Dialect sets dialect version. +func (b *SpellCheckBuilder) Dialect(d int) *SpellCheckBuilder { b.options.Dialect = d; return b } + +// Run executes FT.SPELLCHECK and returns suggestions. +func (b *SpellCheckBuilder) Run() ([]SpellCheckResult, error) { + cmd := b.c.FTSpellCheckWithArgs(b.ctx, b.index, b.query, b.options) + return cmd.Result() +} + +// ---------------------- +// DictBuilder for FT.DICT* commands +// ---------------------- +// DictBuilder is builder for FT.DICT* commands +// EXPERIMENTAL: this API is subject to change, use with caution. +type DictBuilder struct { + c *Client + ctx context.Context + dict string + terms []interface{} + action string // add|del|dump +} + +// NewDictBuilder creates a new DictBuilder for FT.DICT* commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewDictBuilder(ctx context.Context, dict string) *DictBuilder { + return &DictBuilder{c: c, ctx: ctx, dict: dict} +} + +// Action sets the action for the dictionary builder. +func (b *DictBuilder) Action(action string) *DictBuilder { + b.action = action + return b +} + +// Add sets the action to "add" and requires terms. +func (b *DictBuilder) Add(terms ...interface{}) *DictBuilder { + b.action = "add" + b.terms = terms + return b +} + +// Del sets the action to "del" and requires terms. +func (b *DictBuilder) Del(terms ...interface{}) *DictBuilder { + b.action = "del" + b.terms = terms + return b +} + +// Dump sets the action to "dump". +func (b *DictBuilder) Dump() *DictBuilder { + b.action = "dump" + return b +} + +// Run executes the configured dictionary command. +func (b *DictBuilder) Run() (interface{}, error) { + switch b.action { + case "add": + cmd := b.c.FTDictAdd(b.ctx, b.dict, b.terms...) + return cmd.Result() + case "del": + cmd := b.c.FTDictDel(b.ctx, b.dict, b.terms...) + return cmd.Result() + case "dump": + cmd := b.c.FTDictDump(b.ctx, b.dict) + return cmd.Result() + } + return nil, nil +} + +// ---------------------- +// TagValsBuilder for FT.TAGVALS +// ---------------------- +// TagValsBuilder is builder for FT.TAGVALS +// EXPERIMENTAL: this API is subject to change, use with caution. +type TagValsBuilder struct { + c *Client + ctx context.Context + index string + field string +} + +// NewTagValsBuilder creates a new TagValsBuilder for FT.TAGVALS commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewTagValsBuilder(ctx context.Context, index, field string) *TagValsBuilder { + return &TagValsBuilder{c: c, ctx: ctx, index: index, field: field} +} + +// Run executes FT.TAGVALS and returns tag values. +func (b *TagValsBuilder) Run() ([]string, error) { + cmd := b.c.FTTagVals(b.ctx, b.index, b.field) + return cmd.Result() +} + +// ---------------------- +// CursorBuilder for FT.CURSOR* +// ---------------------- +// CursorBuilder is builder for FT.CURSOR* commands +// EXPERIMENTAL: this API is subject to change, use with caution. +type CursorBuilder struct { + c *Client + ctx context.Context + index string + cursorId int64 + count int + action string // read|del +} + +// NewCursorBuilder creates a new CursorBuilder for FT.CURSOR* commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewCursorBuilder(ctx context.Context, index string, cursorId int64) *CursorBuilder { + return &CursorBuilder{c: c, ctx: ctx, index: index, cursorId: cursorId} +} + +// Action sets the action for the cursor builder. +func (b *CursorBuilder) Action(action string) *CursorBuilder { + b.action = action + return b +} + +// Read sets the action to "read". +func (b *CursorBuilder) Read() *CursorBuilder { + b.action = "read" + return b +} + +// Del sets the action to "del". +func (b *CursorBuilder) Del() *CursorBuilder { + b.action = "del" + return b +} + +// Count for READ. +func (b *CursorBuilder) Count(count int) *CursorBuilder { b.count = count; return b } + +// Run executes the cursor command. +func (b *CursorBuilder) Run() (interface{}, error) { + switch b.action { + case "read": + cmd := b.c.FTCursorRead(b.ctx, b.index, int(b.cursorId), b.count) + return cmd.Result() + case "del": + cmd := b.c.FTCursorDel(b.ctx, b.index, int(b.cursorId)) + return cmd.Result() + } + return nil, nil +} + +// ---------------------- +// SynUpdateBuilder for FT.SYNUPDATE +// ---------------------- +// SyncUpdateBuilder is builder for FT.SYNCUPDATE +// EXPERIMENTAL: this API is subject to change, use with caution. +type SynUpdateBuilder struct { + c *Client + ctx context.Context + index string + groupId interface{} + options *FTSynUpdateOptions + terms []interface{} +} + +// NewSynUpdateBuilder creates a new SynUpdateBuilder for FT.SYNUPDATE commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewSynUpdateBuilder(ctx context.Context, index string, groupId interface{}) *SynUpdateBuilder { + return &SynUpdateBuilder{c: c, ctx: ctx, index: index, groupId: groupId, options: &FTSynUpdateOptions{}} +} + +// SkipInitialScan includes SKIPINITIALSCAN. +func (b *SynUpdateBuilder) SkipInitialScan() *SynUpdateBuilder { + b.options.SkipInitialScan = true + return b +} + +// Terms adds synonyms to the group. +func (b *SynUpdateBuilder) Terms(terms ...interface{}) *SynUpdateBuilder { b.terms = terms; return b } + +// Run executes FT.SYNUPDATE. +func (b *SynUpdateBuilder) Run() (string, error) { + cmd := b.c.FTSynUpdateWithArgs(b.ctx, b.index, b.groupId, b.options, b.terms) + return cmd.Result() +} diff --git a/vendor/github.com/redis/go-redis/v9/search_commands.go b/vendor/github.com/redis/go-redis/v9/search_commands.go new file mode 100644 index 000000000..9018b3ded --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/search_commands.go @@ -0,0 +1,2728 @@ +package redis + +import ( + "context" + "fmt" + "strconv" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/proto" +) + +type SearchCmdable interface { + FT_List(ctx context.Context) *StringSliceCmd + FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd + FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd + FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd + FTAliasDel(ctx context.Context, alias string) *StatusCmd + FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd + FTAlter(ctx context.Context, index string, skipInitialScan bool, definition []interface{}) *StatusCmd + FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd + FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd + FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd + FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd + FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd + FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd + FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd + FTDictDump(ctx context.Context, dict string) *StringSliceCmd + FTDropIndex(ctx context.Context, index string) *StatusCmd + FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd + FTExplain(ctx context.Context, index string, query string) *StringCmd + FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd + FTHybrid(ctx context.Context, index string, searchExpr string, vectorField string, vectorData Vector) *FTHybridCmd + FTHybridWithArgs(ctx context.Context, index string, options *FTHybridOptions) *FTHybridCmd + FTInfo(ctx context.Context, index string) *FTInfoCmd + FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd + FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd + FTSearch(ctx context.Context, index string, query string) *FTSearchCmd + FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd + FTSynDump(ctx context.Context, index string) *FTSynDumpCmd + FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd + FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd + FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd +} + +type FTCreateOptions struct { + OnHash bool + OnJSON bool + Prefix []interface{} + Filter string + DefaultLanguage string + LanguageField string + Score float64 + ScoreField string + PayloadField string + MaxTextFields int + NoOffsets bool + Temporary int + NoHL bool + NoFields bool + NoFreqs bool + StopWords []interface{} + SkipInitialScan bool +} + +type FieldSchema struct { + FieldName string + As string + FieldType SearchFieldType + Sortable bool + UNF bool + NoStem bool + NoIndex bool + PhoneticMatcher string + Weight float64 + Separator string + CaseSensitive bool + WithSuffixtrie bool + VectorArgs *FTVectorArgs + GeoShapeFieldType string + IndexEmpty bool + IndexMissing bool +} + +type FTVectorArgs struct { + FlatOptions *FTFlatOptions + HNSWOptions *FTHNSWOptions + VamanaOptions *FTVamanaOptions +} + +type FTFlatOptions struct { + Type string + Dim int + DistanceMetric string + InitialCapacity int + BlockSize int +} + +type FTHNSWOptions struct { + Type string + Dim int + DistanceMetric string + InitialCapacity int + MaxEdgesPerNode int + MaxAllowedEdgesPerNode int + EFRunTime int + Epsilon float64 +} + +type FTVamanaOptions struct { + Type string + Dim int + DistanceMetric string + Compression string + ConstructionWindowSize int + GraphMaxDegree int + SearchWindowSize int + Epsilon float64 + TrainingThreshold int + ReduceDim int +} + +type FTDropIndexOptions struct { + DeleteDocs bool +} + +type SpellCheckTerms struct { + Include bool + Exclude bool + Dictionary string +} + +type FTExplainOptions struct { + // Dialect 1,3 and 4 are deprecated since redis 8.0 + Dialect string +} + +type FTSynUpdateOptions struct { + SkipInitialScan bool +} + +type SearchAggregator int + +const ( + SearchInvalid = SearchAggregator(iota) + SearchAvg + SearchSum + SearchMin + SearchMax + SearchCount + SearchCountDistinct + SearchCountDistinctish + SearchStdDev + SearchQuantile + SearchToList + SearchFirstValue + SearchRandomSample +) + +func (a SearchAggregator) String() string { + switch a { + case SearchInvalid: + return "" + case SearchAvg: + return "AVG" + case SearchSum: + return "SUM" + case SearchMin: + return "MIN" + case SearchMax: + return "MAX" + case SearchCount: + return "COUNT" + case SearchCountDistinct: + return "COUNT_DISTINCT" + case SearchCountDistinctish: + return "COUNT_DISTINCTISH" + case SearchStdDev: + return "STDDEV" + case SearchQuantile: + return "QUANTILE" + case SearchToList: + return "TOLIST" + case SearchFirstValue: + return "FIRST_VALUE" + case SearchRandomSample: + return "RANDOM_SAMPLE" + default: + return "" + } +} + +type SearchFieldType int + +const ( + SearchFieldTypeInvalid = SearchFieldType(iota) + SearchFieldTypeNumeric + SearchFieldTypeTag + SearchFieldTypeText + SearchFieldTypeGeo + SearchFieldTypeVector + SearchFieldTypeGeoShape +) + +func (t SearchFieldType) String() string { + switch t { + case SearchFieldTypeInvalid: + return "" + case SearchFieldTypeNumeric: + return "NUMERIC" + case SearchFieldTypeTag: + return "TAG" + case SearchFieldTypeText: + return "TEXT" + case SearchFieldTypeGeo: + return "GEO" + case SearchFieldTypeVector: + return "VECTOR" + case SearchFieldTypeGeoShape: + return "GEOSHAPE" + default: + return "TEXT" + } +} + +// Each AggregateReducer have different args. +// Please follow https://redis.io/docs/interact/search-and-query/search/aggregations/#supported-groupby-reducers for more information. +type FTAggregateReducer struct { + Reducer SearchAggregator + Args []interface{} + As string +} + +type FTAggregateGroupBy struct { + Fields []interface{} + Reduce []FTAggregateReducer +} + +type FTAggregateSortBy struct { + FieldName string + Asc bool + Desc bool +} + +type FTAggregateApply struct { + Field string + As string +} + +type FTAggregateLoad struct { + Field string + As string +} + +type FTAggregateWithCursor struct { + Count int + MaxIdle int +} + +type FTAggregateOptions struct { + Verbatim bool + LoadAll bool + Load []FTAggregateLoad + Timeout int + GroupBy []FTAggregateGroupBy + SortBy []FTAggregateSortBy + SortByMax int + // Scorer is used to set scoring function, if not set passed, a default will be used. + // The default scorer depends on the Redis version: + // - `BM25` for Redis >= 8 + // - `TFIDF` for Redis < 8 + Scorer string + // AddScores is available in Redis CE 8 + AddScores bool + Apply []FTAggregateApply + LimitOffset int + Limit int + Filter string + WithCursor bool + WithCursorOptions *FTAggregateWithCursor + Params map[string]interface{} + // Dialect 1,3 and 4 are deprecated since redis 8.0 + DialectVersion int +} + +type FTSearchFilter struct { + FieldName interface{} + Min interface{} + Max interface{} +} + +type FTSearchGeoFilter struct { + FieldName string + Longitude float64 + Latitude float64 + Radius float64 + Unit string +} + +type FTSearchReturn struct { + FieldName string + As string +} + +type FTSearchSortBy struct { + FieldName string + Asc bool + Desc bool +} + +// FTSearchOptions hold options that can be passed to the FT.SEARCH command. +// More information about the options can be found +// in the documentation for FT.SEARCH https://redis.io/docs/latest/commands/ft.search/ +type FTSearchOptions struct { + NoContent bool + Verbatim bool + NoStopWords bool + WithScores bool + WithPayloads bool + WithSortKeys bool + Filters []FTSearchFilter + GeoFilter []FTSearchGeoFilter + InKeys []interface{} + InFields []interface{} + Return []FTSearchReturn + Slop int + Timeout int + InOrder bool + Language string + Expander string + // Scorer is used to set scoring function, if not set passed, a default will be used. + // The default scorer depends on the Redis version: + // - `BM25` for Redis >= 8 + // - `TFIDF` for Redis < 8 + Scorer string + ExplainScore bool + Payload string + SortBy []FTSearchSortBy + SortByWithCount bool + LimitOffset int + Limit int + // CountOnly sets LIMIT 0 0 to get the count - number of documents in the result set without actually returning the result set. + // When using this option, the Limit and LimitOffset options are ignored. + CountOnly bool + Params map[string]interface{} + // Dialect 1,3 and 4 are deprecated since redis 8.0 + DialectVersion int +} + +// FTHybridCombineMethod represents the fusion method for combining search and vector results +type FTHybridCombineMethod string + +const ( + FTHybridCombineRRF FTHybridCombineMethod = "RRF" + FTHybridCombineLinear FTHybridCombineMethod = "LINEAR" + FTHybridCombineFunction FTHybridCombineMethod = "FUNCTION" +) + +// FTHybridSearchExpression represents a search expression in hybrid search +type FTHybridSearchExpression struct { + Query string + Scorer string + ScorerParams []interface{} + YieldScoreAs string +} + +type FTHybridVectorMethod = string + +const ( + KNN FTHybridCombineMethod = "KNN" + RANGE FTHybridCombineMethod = "RANGE" +) + +// FTHybridVectorExpression represents a vector expression in hybrid search +type FTHybridVectorExpression struct { + VectorField string + VectorData Vector + Method FTHybridVectorMethod + MethodParams []interface{} + Filter string + YieldScoreAs string +} + +// FTHybridCombineOptions represents options for result fusion +type FTHybridCombineOptions struct { + Method FTHybridCombineMethod + Count int + Window int // For RRF + Constant float64 // For RRF + Alpha float64 // For LINEAR + Beta float64 // For LINEAR + YieldScoreAs string +} + +// FTHybridGroupBy represents GROUP BY functionality +type FTHybridGroupBy struct { + Count int + Fields []string + ReduceFunc string + ReduceCount int + ReduceParams []interface{} +} + +// FTHybridApply represents APPLY functionality +type FTHybridApply struct { + Expression string + AsField string +} + +// FTHybridWithCursor represents cursor configuration for hybrid search +type FTHybridWithCursor struct { + Count int // Number of results to return per cursor read + MaxIdle int // Maximum idle time in milliseconds before cursor is automatically deleted +} + +// FTHybridOptions hold options that can be passed to the FT.HYBRID command +type FTHybridOptions struct { + CountExpressions int // Number of search/vector expressions + SearchExpressions []FTHybridSearchExpression // Multiple search expressions + VectorExpressions []FTHybridVectorExpression // Multiple vector expressions + Combine *FTHybridCombineOptions // Fusion step options + Load []string // Projected fields + GroupBy *FTHybridGroupBy // Aggregation grouping + Apply []FTHybridApply // Field transformations + SortBy []FTSearchSortBy // Reuse from FTSearch + Filter string // Post-filter expression + LimitOffset int // Result limiting + Limit int + Params map[string]interface{} // Parameter substitution + ExplainScore bool // Include score explanations + Timeout int // Runtime timeout + WithCursor bool // Enable cursor support for large result sets + WithCursorOptions *FTHybridWithCursor // Cursor configuration options +} + +type FTSynDumpResult struct { + Term string + Synonyms []string +} + +type FTSynDumpCmd struct { + baseCmd + val []FTSynDumpResult +} + +type FTAggregateResult struct { + Total int + Rows []AggregateRow +} + +type AggregateRow struct { + Fields map[string]interface{} +} + +type AggregateCmd struct { + baseCmd + val *FTAggregateResult +} + +type FTInfoResult struct { + IndexErrors IndexErrors + Attributes []FTAttribute + BytesPerRecordAvg string + Cleaning int + CursorStats CursorStats + DialectStats map[string]int + DocTableSizeMB float64 + FieldStatistics []FieldStatistic + GCStats GCStats + GeoshapesSzMB float64 + HashIndexingFailures int + IndexDefinition IndexDefinition + IndexName string + IndexOptions []string + Indexing int + InvertedSzMB float64 + KeyTableSizeMB float64 + MaxDocID int + NumDocs int + NumRecords int + NumTerms int + NumberOfUses int + OffsetBitsPerRecordAvg string + OffsetVectorsSzMB float64 + OffsetsPerTermAvg string + PercentIndexed float64 + RecordsPerDocAvg string + SortableValuesSizeMB float64 + TagOverheadSzMB float64 + TextOverheadSzMB float64 + TotalIndexMemorySzMB float64 + TotalIndexingTime int + TotalInvertedIndexBlocks int + VectorIndexSzMB float64 +} + +type IndexErrors struct { + IndexingFailures int + LastIndexingError string + LastIndexingErrorKey string +} + +type FTAttribute struct { + Identifier string + Attribute string + Type string + Weight float64 + Sortable bool + NoStem bool + NoIndex bool + UNF bool + PhoneticMatcher string + CaseSensitive bool + WithSuffixtrie bool + + // Vector specific attributes + Algorithm string + DataType string + Dim int + DistanceMetric string + M int + EFConstruction int +} + +type CursorStats struct { + GlobalIdle int + GlobalTotal int + IndexCapacity int + IndexTotal int +} + +type FieldStatistic struct { + Identifier string + Attribute string + IndexErrors IndexErrors +} + +type GCStats struct { + BytesCollected int + TotalMsRun int + TotalCycles int + AverageCycleTimeMs string + LastRunTimeMs int + GCNumericTreesMissed int + GCBlocksDenied int +} + +type IndexDefinition struct { + KeyType string + Prefixes []string + DefaultScore float64 +} + +type FTSpellCheckOptions struct { + Distance int + Terms *FTSpellCheckTerms + // Dialect 1,3 and 4 are deprecated since redis 8.0 + Dialect int +} + +type FTSpellCheckTerms struct { + Inclusion string // Either "INCLUDE" or "EXCLUDE" + Dictionary string + Terms []interface{} +} + +type SpellCheckResult struct { + Term string + Suggestions []SpellCheckSuggestion +} + +type SpellCheckSuggestion struct { + Score float64 + Suggestion string +} + +type FTSearchResult struct { + Total int + Docs []Document +} + +type Document struct { + ID string + Score *float64 + Payload *string + SortKey *string + Fields map[string]string + Error error +} + +type AggregateQuery []interface{} + +// FT_List - Lists all the existing indexes in the database. +// For more information, please refer to the Redis documentation: +// [FT._LIST]: (https://redis.io/commands/ft._list/) +func (c cmdable) FT_List(ctx context.Context) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "FT._LIST") + _ = c(ctx, cmd) + return cmd +} + +// FTAggregate - Performs a search query on an index and applies a series of aggregate transformations to the result. +// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. +// For more information, please refer to the Redis documentation: +// [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/) +func (c cmdable) FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd { + args := []interface{}{"FT.AGGREGATE", index, query} + cmd := NewMapStringInterfaceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func FTAggregateQuery(query string, options *FTAggregateOptions) (AggregateQuery, error) { + queryArgs := []interface{}{query} + if options != nil { + if options.Verbatim { + queryArgs = append(queryArgs, "VERBATIM") + } + + if options.Scorer != "" { + queryArgs = append(queryArgs, "SCORER", options.Scorer) + } + + if options.AddScores { + queryArgs = append(queryArgs, "ADDSCORES") + } + + if options.LoadAll && options.Load != nil { + return nil, fmt.Errorf("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive") + } + if options.LoadAll { + queryArgs = append(queryArgs, "LOAD", "*") + } + if options.Load != nil { + queryArgs = append(queryArgs, "LOAD", len(options.Load)) + index, count := len(queryArgs)-1, 0 + for _, load := range options.Load { + queryArgs = append(queryArgs, load.Field) + count++ + if load.As != "" { + queryArgs = append(queryArgs, "AS", load.As) + count += 2 + } + } + queryArgs[index] = count + } + + if options.Timeout > 0 { + queryArgs = append(queryArgs, "TIMEOUT", options.Timeout) + } + + for _, apply := range options.Apply { + queryArgs = append(queryArgs, "APPLY", apply.Field) + if apply.As != "" { + queryArgs = append(queryArgs, "AS", apply.As) + } + } + + if options.GroupBy != nil { + for _, groupBy := range options.GroupBy { + queryArgs = append(queryArgs, "GROUPBY", len(groupBy.Fields)) + queryArgs = append(queryArgs, groupBy.Fields...) + + for _, reducer := range groupBy.Reduce { + queryArgs = append(queryArgs, "REDUCE") + queryArgs = append(queryArgs, reducer.Reducer.String()) + if reducer.Args != nil { + queryArgs = append(queryArgs, len(reducer.Args)) + queryArgs = append(queryArgs, reducer.Args...) + } else { + queryArgs = append(queryArgs, 0) + } + if reducer.As != "" { + queryArgs = append(queryArgs, "AS", reducer.As) + } + } + } + } + if options.SortBy != nil { + queryArgs = append(queryArgs, "SORTBY") + sortByOptions := []interface{}{} + for _, sortBy := range options.SortBy { + sortByOptions = append(sortByOptions, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + return nil, fmt.Errorf("FT.AGGREGATE: ASC and DESC are mutually exclusive") + } + if sortBy.Asc { + sortByOptions = append(sortByOptions, "ASC") + } + if sortBy.Desc { + sortByOptions = append(sortByOptions, "DESC") + } + } + queryArgs = append(queryArgs, len(sortByOptions)) + queryArgs = append(queryArgs, sortByOptions...) + } + if options.SortByMax > 0 { + queryArgs = append(queryArgs, "MAX", options.SortByMax) + } + if options.LimitOffset >= 0 && options.Limit > 0 { + queryArgs = append(queryArgs, "LIMIT", options.LimitOffset, options.Limit) + } + if options.Filter != "" { + queryArgs = append(queryArgs, "FILTER", options.Filter) + } + if options.WithCursor { + queryArgs = append(queryArgs, "WITHCURSOR") + if options.WithCursorOptions != nil { + if options.WithCursorOptions.Count > 0 { + queryArgs = append(queryArgs, "COUNT", options.WithCursorOptions.Count) + } + if options.WithCursorOptions.MaxIdle > 0 { + queryArgs = append(queryArgs, "MAXIDLE", options.WithCursorOptions.MaxIdle) + } + } + } + if options.Params != nil { + queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + queryArgs = append(queryArgs, key, value) + } + } + + if options.DialectVersion > 0 { + queryArgs = append(queryArgs, "DIALECT", options.DialectVersion) + } else { + queryArgs = append(queryArgs, "DIALECT", 2) + } + } + return queryArgs, nil +} + +func ProcessAggregateResult(data []interface{}) (*FTAggregateResult, error) { + if len(data) == 0 { + return nil, fmt.Errorf("no data returned") + } + + total, ok := data[0].(int64) + if !ok { + return nil, fmt.Errorf("invalid total format") + } + + rows := make([]AggregateRow, 0, len(data)-1) + for _, row := range data[1:] { + fields, ok := row.([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid row format") + } + + rowMap := make(map[string]interface{}) + for i := 0; i < len(fields); i += 2 { + key, ok := fields[i].(string) + if !ok { + return nil, fmt.Errorf("invalid field key format") + } + value := fields[i+1] + rowMap[key] = value + } + rows = append(rows, AggregateRow{Fields: rowMap}) + } + + result := &FTAggregateResult{ + Total: int(total), + Rows: rows, + } + return result, nil +} + +func NewAggregateCmd(ctx context.Context, args ...interface{}) *AggregateCmd { + return &AggregateCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *AggregateCmd) SetVal(val *FTAggregateResult) { + cmd.val = val +} + +func (cmd *AggregateCmd) Val() *FTAggregateResult { + return cmd.val +} + +func (cmd *AggregateCmd) Result() (*FTAggregateResult, error) { + return cmd.val, cmd.err +} + +func (cmd *AggregateCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *AggregateCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func (cmd *AggregateCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *AggregateCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + return err + } + cmd.val, err = ProcessAggregateResult(data) + if err != nil { + return err + } + return nil +} + +// FTAggregateWithArgs - Performs a search query on an index and applies a series of aggregate transformations to the result. +// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. +// This function also allows for specifying additional options such as: Verbatim, LoadAll, Load, Timeout, GroupBy, SortBy, SortByMax, Apply, LimitOffset, Limit, Filter, WithCursor, Params, and DialectVersion. +// For more information, please refer to the Redis documentation: +// [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/) +func (c cmdable) FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd { + args := []interface{}{"FT.AGGREGATE", index, query} + if options != nil { + if options.Verbatim { + args = append(args, "VERBATIM") + } + if options.Scorer != "" { + args = append(args, "SCORER", options.Scorer) + } + if options.AddScores { + args = append(args, "ADDSCORES") + } + if options.LoadAll && options.Load != nil { + cmd := NewAggregateCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive")) + return cmd + } + if options.LoadAll { + args = append(args, "LOAD", "*") + } + if options.Load != nil { + args = append(args, "LOAD", len(options.Load)) + index, count := len(args)-1, 0 + for _, load := range options.Load { + args = append(args, load.Field) + count++ + if load.As != "" { + args = append(args, "AS", load.As) + count += 2 + } + } + args[index] = count + } + if options.Timeout > 0 { + args = append(args, "TIMEOUT", options.Timeout) + } + for _, apply := range options.Apply { + args = append(args, "APPLY", apply.Field) + if apply.As != "" { + args = append(args, "AS", apply.As) + } + } + if options.GroupBy != nil { + for _, groupBy := range options.GroupBy { + args = append(args, "GROUPBY", len(groupBy.Fields)) + args = append(args, groupBy.Fields...) + + for _, reducer := range groupBy.Reduce { + args = append(args, "REDUCE") + args = append(args, reducer.Reducer.String()) + if reducer.Args != nil { + args = append(args, len(reducer.Args)) + args = append(args, reducer.Args...) + } else { + args = append(args, 0) + } + if reducer.As != "" { + args = append(args, "AS", reducer.As) + } + } + } + } + if options.SortBy != nil { + args = append(args, "SORTBY") + sortByOptions := []interface{}{} + for _, sortBy := range options.SortBy { + sortByOptions = append(sortByOptions, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + cmd := NewAggregateCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.AGGREGATE: ASC and DESC are mutually exclusive")) + return cmd + } + if sortBy.Asc { + sortByOptions = append(sortByOptions, "ASC") + } + if sortBy.Desc { + sortByOptions = append(sortByOptions, "DESC") + } + } + args = append(args, len(sortByOptions)) + args = append(args, sortByOptions...) + } + if options.SortByMax > 0 { + args = append(args, "MAX", options.SortByMax) + } + if options.LimitOffset >= 0 && options.Limit > 0 { + args = append(args, "LIMIT", options.LimitOffset, options.Limit) + } + if options.Filter != "" { + args = append(args, "FILTER", options.Filter) + } + if options.WithCursor { + args = append(args, "WITHCURSOR") + if options.WithCursorOptions != nil { + if options.WithCursorOptions.Count > 0 { + args = append(args, "COUNT", options.WithCursorOptions.Count) + } + if options.WithCursorOptions.MaxIdle > 0 { + args = append(args, "MAXIDLE", options.WithCursorOptions.MaxIdle) + } + } + } + if options.Params != nil { + args = append(args, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + args = append(args, key, value) + } + } + if options.DialectVersion > 0 { + args = append(args, "DIALECT", options.DialectVersion) + } else { + args = append(args, "DIALECT", 2) + } + } + + cmd := NewAggregateCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTAliasAdd - Adds an alias to an index. +// The 'index' parameter specifies the index to which the alias is added, and the 'alias' parameter specifies the alias. +// For more information, please refer to the Redis documentation: +// [FT.ALIASADD]: (https://redis.io/commands/ft.aliasadd/) +func (c cmdable) FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd { + args := []interface{}{"FT.ALIASADD", alias, index} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTAliasDel - Removes an alias from an index. +// The 'alias' parameter specifies the alias to be removed. +// For more information, please refer to the Redis documentation: +// [FT.ALIASDEL]: (https://redis.io/commands/ft.aliasdel/) +func (c cmdable) FTAliasDel(ctx context.Context, alias string) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.ALIASDEL", alias) + _ = c(ctx, cmd) + return cmd +} + +// FTAliasUpdate - Updates an alias to an index. +// The 'index' parameter specifies the index to which the alias is updated, and the 'alias' parameter specifies the alias. +// If the alias already exists for a different index, it updates the alias to point to the specified index instead. +// For more information, please refer to the Redis documentation: +// [FT.ALIASUPDATE]: (https://redis.io/commands/ft.aliasupdate/) +func (c cmdable) FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.ALIASUPDATE", alias, index) + _ = c(ctx, cmd) + return cmd +} + +// FTAlter - Alters the definition of an existing index. +// The 'index' parameter specifies the index to alter, and the 'skipInitialScan' parameter specifies whether to skip the initial scan. +// The 'definition' parameter specifies the new definition for the index. +// For more information, please refer to the Redis documentation: +// [FT.ALTER]: (https://redis.io/commands/ft.alter/) +func (c cmdable) FTAlter(ctx context.Context, index string, skipInitialScan bool, definition []interface{}) *StatusCmd { + args := []interface{}{"FT.ALTER", index} + if skipInitialScan { + args = append(args, "SKIPINITIALSCAN") + } + args = append(args, "SCHEMA", "ADD") + args = append(args, definition...) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Retrieves the value of a RediSearch configuration parameter. +// The 'option' parameter specifies the configuration parameter to retrieve. +// For more information, please refer to the Redis [FT.CONFIG GET] documentation. +// +// Deprecated: FTConfigGet is deprecated in Redis 8. +// All configuration will be done with the CONFIG GET command. +// For more information check [Client.ConfigGet] and [CONFIG GET Documentation] +// +// [CONFIG GET Documentation]: https://redis.io/commands/config-get/ +// [FT.CONFIG GET]: https://redis.io/commands/ft.config-get/ +func (c cmdable) FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd { + cmd := NewMapMapStringInterfaceCmd(ctx, "FT.CONFIG", "GET", option) + _ = c(ctx, cmd) + return cmd +} + +// Sets the value of a RediSearch configuration parameter. +// The 'option' parameter specifies the configuration parameter to set, and the 'value' parameter specifies the new value. +// For more information, please refer to the Redis [FT.CONFIG SET] documentation. +// +// Deprecated: FTConfigSet is deprecated in Redis 8. +// All configuration will be done with the CONFIG SET command. +// For more information check [Client.ConfigSet] and [CONFIG SET Documentation] +// +// [CONFIG SET Documentation]: https://redis.io/commands/config-set/ +// [FT.CONFIG SET]: https://redis.io/commands/ft.config-set/ +func (c cmdable) FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.CONFIG", "SET", option, value) + _ = c(ctx, cmd) + return cmd +} + +// FTCreate - Creates a new index with the given options and schema. +// The 'index' parameter specifies the name of the index to create. +// The 'options' parameter specifies various options for the index, such as: +// whether to index hashes or JSONs, prefixes, filters, default language, score, score field, payload field, etc. +// The 'schema' parameter specifies the schema for the index, which includes the field name, field type, etc. +// For more information, please refer to the Redis documentation: +// [FT.CREATE]: (https://redis.io/commands/ft.create/) +func (c cmdable) FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd { + args := []interface{}{"FT.CREATE", index} + if options != nil { + if options.OnHash && !options.OnJSON { + args = append(args, "ON", "HASH") + } + if options.OnJSON && !options.OnHash { + args = append(args, "ON", "JSON") + } + if options.OnHash && options.OnJSON { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: ON HASH and ON JSON are mutually exclusive")) + return cmd + } + if options.Prefix != nil { + args = append(args, "PREFIX", len(options.Prefix)) + args = append(args, options.Prefix...) + } + if options.Filter != "" { + args = append(args, "FILTER", options.Filter) + } + if options.DefaultLanguage != "" { + args = append(args, "LANGUAGE", options.DefaultLanguage) + } + if options.LanguageField != "" { + args = append(args, "LANGUAGE_FIELD", options.LanguageField) + } + if options.Score > 0 { + args = append(args, "SCORE", options.Score) + } + if options.ScoreField != "" { + args = append(args, "SCORE_FIELD", options.ScoreField) + } + if options.PayloadField != "" { + args = append(args, "PAYLOAD_FIELD", options.PayloadField) + } + if options.MaxTextFields > 0 { + args = append(args, "MAXTEXTFIELDS", options.MaxTextFields) + } + if options.NoOffsets { + args = append(args, "NOOFFSETS") + } + if options.Temporary > 0 { + args = append(args, "TEMPORARY", options.Temporary) + } + if options.NoHL { + args = append(args, "NOHL") + } + if options.NoFields { + args = append(args, "NOFIELDS") + } + if options.NoFreqs { + args = append(args, "NOFREQS") + } + if options.StopWords != nil { + args = append(args, "STOPWORDS", len(options.StopWords)) + args = append(args, options.StopWords...) + } + if options.SkipInitialScan { + args = append(args, "SKIPINITIALSCAN") + } + } + if schema == nil { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: SCHEMA is required")) + return cmd + } + args = append(args, "SCHEMA") + for _, schema := range schema { + if schema.FieldName == "" || schema.FieldType == SearchFieldTypeInvalid { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: SCHEMA FieldName and FieldType are required")) + return cmd + } + args = append(args, schema.FieldName) + if schema.As != "" { + args = append(args, "AS", schema.As) + } + args = append(args, schema.FieldType.String()) + if schema.VectorArgs != nil { + if schema.FieldType != SearchFieldTypeVector { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: SCHEMA FieldType VECTOR is required for VectorArgs")) + return cmd + } + // Check mutual exclusivity of vector options + optionCount := 0 + if schema.VectorArgs.FlatOptions != nil { + optionCount++ + } + if schema.VectorArgs.HNSWOptions != nil { + optionCount++ + } + if schema.VectorArgs.VamanaOptions != nil { + optionCount++ + } + if optionCount != 1 { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: SCHEMA VectorArgs must have exactly one of FlatOptions, HNSWOptions, or VamanaOptions")) + return cmd + } + if schema.VectorArgs.FlatOptions != nil { + args = append(args, "FLAT") + if schema.VectorArgs.FlatOptions.Type == "" || schema.VectorArgs.FlatOptions.Dim == 0 || schema.VectorArgs.FlatOptions.DistanceMetric == "" { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR FLAT")) + return cmd + } + flatArgs := []interface{}{ + "TYPE", schema.VectorArgs.FlatOptions.Type, + "DIM", schema.VectorArgs.FlatOptions.Dim, + "DISTANCE_METRIC", schema.VectorArgs.FlatOptions.DistanceMetric, + } + if schema.VectorArgs.FlatOptions.InitialCapacity > 0 { + flatArgs = append(flatArgs, "INITIAL_CAP", schema.VectorArgs.FlatOptions.InitialCapacity) + } + if schema.VectorArgs.FlatOptions.BlockSize > 0 { + flatArgs = append(flatArgs, "BLOCK_SIZE", schema.VectorArgs.FlatOptions.BlockSize) + } + args = append(args, len(flatArgs)) + args = append(args, flatArgs...) + } + if schema.VectorArgs.HNSWOptions != nil { + args = append(args, "HNSW") + if schema.VectorArgs.HNSWOptions.Type == "" || schema.VectorArgs.HNSWOptions.Dim == 0 || schema.VectorArgs.HNSWOptions.DistanceMetric == "" { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR HNSW")) + return cmd + } + hnswArgs := []interface{}{ + "TYPE", schema.VectorArgs.HNSWOptions.Type, + "DIM", schema.VectorArgs.HNSWOptions.Dim, + "DISTANCE_METRIC", schema.VectorArgs.HNSWOptions.DistanceMetric, + } + if schema.VectorArgs.HNSWOptions.InitialCapacity > 0 { + hnswArgs = append(hnswArgs, "INITIAL_CAP", schema.VectorArgs.HNSWOptions.InitialCapacity) + } + if schema.VectorArgs.HNSWOptions.MaxEdgesPerNode > 0 { + hnswArgs = append(hnswArgs, "M", schema.VectorArgs.HNSWOptions.MaxEdgesPerNode) + } + if schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode > 0 { + hnswArgs = append(hnswArgs, "EF_CONSTRUCTION", schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode) + } + if schema.VectorArgs.HNSWOptions.EFRunTime > 0 { + hnswArgs = append(hnswArgs, "EF_RUNTIME", schema.VectorArgs.HNSWOptions.EFRunTime) + } + if schema.VectorArgs.HNSWOptions.Epsilon > 0 { + hnswArgs = append(hnswArgs, "EPSILON", schema.VectorArgs.HNSWOptions.Epsilon) + } + args = append(args, len(hnswArgs)) + args = append(args, hnswArgs...) + } + if schema.VectorArgs.VamanaOptions != nil { + args = append(args, "SVS-VAMANA") + if schema.VectorArgs.VamanaOptions.Type == "" || schema.VectorArgs.VamanaOptions.Dim == 0 || schema.VectorArgs.VamanaOptions.DistanceMetric == "" { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR VAMANA")) + return cmd + } + vamanaArgs := []interface{}{ + "TYPE", schema.VectorArgs.VamanaOptions.Type, + "DIM", schema.VectorArgs.VamanaOptions.Dim, + "DISTANCE_METRIC", schema.VectorArgs.VamanaOptions.DistanceMetric, + } + if schema.VectorArgs.VamanaOptions.Compression != "" { + vamanaArgs = append(vamanaArgs, "COMPRESSION", schema.VectorArgs.VamanaOptions.Compression) + } + if schema.VectorArgs.VamanaOptions.ConstructionWindowSize > 0 { + vamanaArgs = append(vamanaArgs, "CONSTRUCTION_WINDOW_SIZE", schema.VectorArgs.VamanaOptions.ConstructionWindowSize) + } + if schema.VectorArgs.VamanaOptions.GraphMaxDegree > 0 { + vamanaArgs = append(vamanaArgs, "GRAPH_MAX_DEGREE", schema.VectorArgs.VamanaOptions.GraphMaxDegree) + } + if schema.VectorArgs.VamanaOptions.SearchWindowSize > 0 { + vamanaArgs = append(vamanaArgs, "SEARCH_WINDOW_SIZE", schema.VectorArgs.VamanaOptions.SearchWindowSize) + } + if schema.VectorArgs.VamanaOptions.Epsilon > 0 { + vamanaArgs = append(vamanaArgs, "EPSILON", schema.VectorArgs.VamanaOptions.Epsilon) + } + if schema.VectorArgs.VamanaOptions.TrainingThreshold > 0 { + vamanaArgs = append(vamanaArgs, "TRAINING_THRESHOLD", schema.VectorArgs.VamanaOptions.TrainingThreshold) + } + if schema.VectorArgs.VamanaOptions.ReduceDim > 0 { + vamanaArgs = append(vamanaArgs, "REDUCE", schema.VectorArgs.VamanaOptions.ReduceDim) + } + args = append(args, len(vamanaArgs)) + args = append(args, vamanaArgs...) + } + } + if schema.GeoShapeFieldType != "" { + if schema.FieldType != SearchFieldTypeGeoShape { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: SCHEMA FieldType GEOSHAPE is required for GeoShapeFieldType")) + return cmd + } + args = append(args, schema.GeoShapeFieldType) + } + if schema.NoStem { + args = append(args, "NOSTEM") + } + if schema.Sortable { + args = append(args, "SORTABLE") + } + if schema.UNF { + args = append(args, "UNF") + } + if schema.NoIndex { + args = append(args, "NOINDEX") + } + if schema.PhoneticMatcher != "" { + args = append(args, "PHONETIC", schema.PhoneticMatcher) + } + if schema.Weight > 0 { + args = append(args, "WEIGHT", schema.Weight) + } + if schema.Separator != "" { + args = append(args, "SEPARATOR", schema.Separator) + } + if schema.CaseSensitive { + args = append(args, "CASESENSITIVE") + } + if schema.WithSuffixtrie { + args = append(args, "WITHSUFFIXTRIE") + } + if schema.IndexEmpty { + args = append(args, "INDEXEMPTY") + } + if schema.IndexMissing { + args = append(args, "INDEXMISSING") + + } + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTCursorDel - Deletes a cursor from an existing index. +// The 'index' parameter specifies the index from which to delete the cursor, and the 'cursorId' parameter specifies the ID of the cursor to delete. +// For more information, please refer to the Redis documentation: +// [FT.CURSOR DEL]: (https://redis.io/commands/ft.cursor-del/) +func (c cmdable) FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.CURSOR", "DEL", index, cursorId) + _ = c(ctx, cmd) + return cmd +} + +// FTCursorRead - Reads the next results from an existing cursor. +// The 'index' parameter specifies the index from which to read the cursor, the 'cursorId' parameter specifies the ID of the cursor to read, and the 'count' parameter specifies the number of results to read. +// For more information, please refer to the Redis documentation: +// [FT.CURSOR READ]: (https://redis.io/commands/ft.cursor-read/) +func (c cmdable) FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd { + args := []interface{}{"FT.CURSOR", "READ", index, cursorId} + if count > 0 { + args = append(args, "COUNT", count) + } + cmd := NewMapStringInterfaceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDictAdd - Adds terms to a dictionary. +// The 'dict' parameter specifies the dictionary to which to add the terms, and the 'term' parameter specifies the terms to add. +// For more information, please refer to the Redis documentation: +// [FT.DICTADD]: (https://redis.io/commands/ft.dictadd/) +func (c cmdable) FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd { + args := []interface{}{"FT.DICTADD", dict} + args = append(args, term...) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDictDel - Deletes terms from a dictionary. +// The 'dict' parameter specifies the dictionary from which to delete the terms, and the 'term' parameter specifies the terms to delete. +// For more information, please refer to the Redis documentation: +// [FT.DICTDEL]: (https://redis.io/commands/ft.dictdel/) +func (c cmdable) FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd { + args := []interface{}{"FT.DICTDEL", dict} + args = append(args, term...) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDictDump - Returns all terms in the specified dictionary. +// The 'dict' parameter specifies the dictionary from which to return the terms. +// For more information, please refer to the Redis documentation: +// [FT.DICTDUMP]: (https://redis.io/commands/ft.dictdump/) +func (c cmdable) FTDictDump(ctx context.Context, dict string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "FT.DICTDUMP", dict) + _ = c(ctx, cmd) + return cmd +} + +// FTDropIndex - Deletes an index. +// The 'index' parameter specifies the index to delete. +// For more information, please refer to the Redis documentation: +// [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/) +func (c cmdable) FTDropIndex(ctx context.Context, index string) *StatusCmd { + args := []interface{}{"FT.DROPINDEX", index} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDropIndexWithArgs - Deletes an index with options. +// The 'index' parameter specifies the index to delete, and the 'options' parameter specifies the DeleteDocs option for docs deletion. +// For more information, please refer to the Redis documentation: +// [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/) +func (c cmdable) FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd { + args := []interface{}{"FT.DROPINDEX", index} + if options != nil { + if options.DeleteDocs { + args = append(args, "DD") + } + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTExplain - Returns the execution plan for a complex query. +// The 'index' parameter specifies the index to query, and the 'query' parameter specifies the query string. +// For more information, please refer to the Redis documentation: +// [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/) +func (c cmdable) FTExplain(ctx context.Context, index string, query string) *StringCmd { + cmd := NewStringCmd(ctx, "FT.EXPLAIN", index, query) + _ = c(ctx, cmd) + return cmd +} + +// FTExplainWithArgs - Returns the execution plan for a complex query with options. +// The 'index' parameter specifies the index to query, the 'query' parameter specifies the query string, and the 'options' parameter specifies the Dialect for the query. +// For more information, please refer to the Redis documentation: +// [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/) +func (c cmdable) FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd { + args := []interface{}{"FT.EXPLAIN", index, query} + if options.Dialect != "" { + args = append(args, "DIALECT", options.Dialect) + } else { + args = append(args, "DIALECT", 2) + } + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTExplainCli - Returns the execution plan for a complex query. [Not Implemented] +// For more information, see https://redis.io/commands/ft.explaincli/ +func (c cmdable) FTExplainCli(ctx context.Context, key, path string) error { + return fmt.Errorf("FTExplainCli is not implemented") +} + +func parseFTInfo(data map[string]interface{}) (FTInfoResult, error) { + var ftInfo FTInfoResult + // Manually parse each field from the map + if indexErrors, ok := data["Index Errors"].([]interface{}); ok { + ftInfo.IndexErrors = IndexErrors{ + IndexingFailures: internal.ToInteger(indexErrors[1]), + LastIndexingError: internal.ToString(indexErrors[3]), + LastIndexingErrorKey: internal.ToString(indexErrors[5]), + } + } + + if attributes, ok := data["attributes"].([]interface{}); ok { + for _, attr := range attributes { + if attrMap, ok := attr.([]interface{}); ok { + att := FTAttribute{} + attrLen := len(attrMap) + for i := 0; i < attrLen; i++ { + if internal.ToLower(internal.ToString(attrMap[i])) == "attribute" && i+1 < attrLen { + att.Attribute = internal.ToString(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "identifier" && i+1 < attrLen { + att.Identifier = internal.ToString(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "type" && i+1 < attrLen { + att.Type = internal.ToString(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "weight" && i+1 < attrLen { + att.Weight = internal.ToFloat(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "nostem" { + att.NoStem = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "sortable" { + att.Sortable = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "noindex" { + att.NoIndex = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "unf" { + att.UNF = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "phonetic" && i+1 < attrLen { + att.PhoneticMatcher = internal.ToString(attrMap[i+1]) + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "case_sensitive" { + att.CaseSensitive = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "withsuffixtrie" { + att.WithSuffixtrie = true + continue + } + + // vector specific attributes + if internal.ToLower(internal.ToString(attrMap[i])) == "algorithm" && i+1 < attrLen { + att.Algorithm = internal.ToString(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "data_type" && i+1 < attrLen { + att.DataType = internal.ToString(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "dim" && i+1 < attrLen { + att.Dim = internal.ToInteger(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "distance_metric" && i+1 < attrLen { + att.DistanceMetric = internal.ToString(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "m" && i+1 < attrLen { + att.M = internal.ToInteger(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "ef_construction" && i+1 < attrLen { + att.EFConstruction = internal.ToInteger(attrMap[i+1]) + i++ + continue + } + + } + ftInfo.Attributes = append(ftInfo.Attributes, att) + } + } + } + + ftInfo.BytesPerRecordAvg = internal.ToString(data["bytes_per_record_avg"]) + ftInfo.Cleaning = internal.ToInteger(data["cleaning"]) + + if cursorStats, ok := data["cursor_stats"].([]interface{}); ok { + ftInfo.CursorStats = CursorStats{ + GlobalIdle: internal.ToInteger(cursorStats[1]), + GlobalTotal: internal.ToInteger(cursorStats[3]), + IndexCapacity: internal.ToInteger(cursorStats[5]), + IndexTotal: internal.ToInteger(cursorStats[7]), + } + } + + if dialectStats, ok := data["dialect_stats"].([]interface{}); ok { + ftInfo.DialectStats = make(map[string]int) + for i := 0; i < len(dialectStats); i += 2 { + ftInfo.DialectStats[internal.ToString(dialectStats[i])] = internal.ToInteger(dialectStats[i+1]) + } + } + + ftInfo.DocTableSizeMB = internal.ToFloat(data["doc_table_size_mb"]) + + if fieldStats, ok := data["field statistics"].([]interface{}); ok { + for _, stat := range fieldStats { + if statMap, ok := stat.([]interface{}); ok { + ftInfo.FieldStatistics = append(ftInfo.FieldStatistics, FieldStatistic{ + Identifier: internal.ToString(statMap[1]), + Attribute: internal.ToString(statMap[3]), + IndexErrors: IndexErrors{ + IndexingFailures: internal.ToInteger(statMap[5].([]interface{})[1]), + LastIndexingError: internal.ToString(statMap[5].([]interface{})[3]), + LastIndexingErrorKey: internal.ToString(statMap[5].([]interface{})[5]), + }, + }) + } + } + } + + if gcStats, ok := data["gc_stats"].([]interface{}); ok { + ftInfo.GCStats = GCStats{} + for i := 0; i < len(gcStats); i += 2 { + if internal.ToLower(internal.ToString(gcStats[i])) == "bytes_collected" { + ftInfo.GCStats.BytesCollected = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "total_ms_run" { + ftInfo.GCStats.TotalMsRun = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "total_cycles" { + ftInfo.GCStats.TotalCycles = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "average_cycle_time_ms" { + ftInfo.GCStats.AverageCycleTimeMs = internal.ToString(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "last_run_time_ms" { + ftInfo.GCStats.LastRunTimeMs = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "gc_numeric_trees_missed" { + ftInfo.GCStats.GCNumericTreesMissed = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "gc_blocks_denied" { + ftInfo.GCStats.GCBlocksDenied = internal.ToInteger(gcStats[i+1]) + continue + } + } + } + + ftInfo.GeoshapesSzMB = internal.ToFloat(data["geoshapes_sz_mb"]) + ftInfo.HashIndexingFailures = internal.ToInteger(data["hash_indexing_failures"]) + + if indexDef, ok := data["index_definition"].([]interface{}); ok { + ftInfo.IndexDefinition = IndexDefinition{ + KeyType: internal.ToString(indexDef[1]), + Prefixes: internal.ToStringSlice(indexDef[3]), + DefaultScore: internal.ToFloat(indexDef[5]), + } + } + + ftInfo.IndexName = internal.ToString(data["index_name"]) + ftInfo.IndexOptions = internal.ToStringSlice(data["index_options"].([]interface{})) + ftInfo.Indexing = internal.ToInteger(data["indexing"]) + ftInfo.InvertedSzMB = internal.ToFloat(data["inverted_sz_mb"]) + ftInfo.KeyTableSizeMB = internal.ToFloat(data["key_table_size_mb"]) + ftInfo.MaxDocID = internal.ToInteger(data["max_doc_id"]) + ftInfo.NumDocs = internal.ToInteger(data["num_docs"]) + ftInfo.NumRecords = internal.ToInteger(data["num_records"]) + ftInfo.NumTerms = internal.ToInteger(data["num_terms"]) + ftInfo.NumberOfUses = internal.ToInteger(data["number_of_uses"]) + ftInfo.OffsetBitsPerRecordAvg = internal.ToString(data["offset_bits_per_record_avg"]) + ftInfo.OffsetVectorsSzMB = internal.ToFloat(data["offset_vectors_sz_mb"]) + ftInfo.OffsetsPerTermAvg = internal.ToString(data["offsets_per_term_avg"]) + ftInfo.PercentIndexed = internal.ToFloat(data["percent_indexed"]) + ftInfo.RecordsPerDocAvg = internal.ToString(data["records_per_doc_avg"]) + ftInfo.SortableValuesSizeMB = internal.ToFloat(data["sortable_values_size_mb"]) + ftInfo.TagOverheadSzMB = internal.ToFloat(data["tag_overhead_sz_mb"]) + ftInfo.TextOverheadSzMB = internal.ToFloat(data["text_overhead_sz_mb"]) + ftInfo.TotalIndexMemorySzMB = internal.ToFloat(data["total_index_memory_sz_mb"]) + ftInfo.TotalIndexingTime = internal.ToInteger(data["total_indexing_time"]) + ftInfo.TotalInvertedIndexBlocks = internal.ToInteger(data["total_inverted_index_blocks"]) + ftInfo.VectorIndexSzMB = internal.ToFloat(data["vector_index_sz_mb"]) + + return ftInfo, nil +} + +type FTInfoCmd struct { + baseCmd + val FTInfoResult +} + +func newFTInfoCmd(ctx context.Context, args ...interface{}) *FTInfoCmd { + return &FTInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FTInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTInfoCmd) SetVal(val FTInfoResult) { + cmd.val = val +} + +func (cmd *FTInfoCmd) Result() (FTInfoResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTInfoCmd) Val() FTInfoResult { + return cmd.val +} + +func (cmd *FTInfoCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTInfoCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} +func (cmd *FTInfoCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + data := make(map[string]interface{}, n) + for i := 0; i < n; i++ { + k, err := rd.ReadString() + if err != nil { + return err + } + v, err := rd.ReadReply() + if err != nil { + if err == Nil { + data[k] = Nil + continue + } + if err, ok := err.(proto.RedisError); ok { + data[k] = err + continue + } + return err + } + data[k] = v + } + cmd.val, err = parseFTInfo(data) + if err != nil { + return err + } + + return nil +} + +// FTInfo - Retrieves information about an index. +// The 'index' parameter specifies the index to retrieve information about. +// For more information, please refer to the Redis documentation: +// [FT.INFO]: (https://redis.io/commands/ft.info/) +func (c cmdable) FTInfo(ctx context.Context, index string) *FTInfoCmd { + cmd := newFTInfoCmd(ctx, "FT.INFO", index) + _ = c(ctx, cmd) + return cmd +} + +// FTSpellCheck - Checks a query string for spelling errors. +// For more details about spellcheck query please follow: +// https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/ +// For more information, please refer to the Redis documentation: +// [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/) +func (c cmdable) FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd { + args := []interface{}{"FT.SPELLCHECK", index, query} + cmd := newFTSpellCheckCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTSpellCheckWithArgs - Checks a query string for spelling errors with additional options. +// For more details about spellcheck query please follow: +// https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/ +// For more information, please refer to the Redis documentation: +// [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/) +func (c cmdable) FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd { + args := []interface{}{"FT.SPELLCHECK", index, query} + if options != nil { + if options.Distance > 0 { + args = append(args, "DISTANCE", options.Distance) + } + if options.Terms != nil { + args = append(args, "TERMS", options.Terms.Inclusion, options.Terms.Dictionary) + args = append(args, options.Terms.Terms...) + } + if options.Dialect > 0 { + args = append(args, "DIALECT", options.Dialect) + } else { + args = append(args, "DIALECT", 2) + } + } + cmd := newFTSpellCheckCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type FTSpellCheckCmd struct { + baseCmd + val []SpellCheckResult +} + +func newFTSpellCheckCmd(ctx context.Context, args ...interface{}) *FTSpellCheckCmd { + return &FTSpellCheckCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FTSpellCheckCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTSpellCheckCmd) SetVal(val []SpellCheckResult) { + cmd.val = val +} + +func (cmd *FTSpellCheckCmd) Result() ([]SpellCheckResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTSpellCheckCmd) Val() []SpellCheckResult { + return cmd.val +} + +func (cmd *FTSpellCheckCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTSpellCheckCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func (cmd *FTSpellCheckCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + return err + } + cmd.val, err = parseFTSpellCheck(data) + if err != nil { + return err + } + return nil +} + +func parseFTSpellCheck(data []interface{}) ([]SpellCheckResult, error) { + results := make([]SpellCheckResult, 0, len(data)) + + for _, termData := range data { + termInfo, ok := termData.([]interface{}) + if !ok || len(termInfo) != 3 { + return nil, fmt.Errorf("invalid term format") + } + + term, ok := termInfo[1].(string) + if !ok { + return nil, fmt.Errorf("invalid term format") + } + + suggestionsData, ok := termInfo[2].([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid suggestions format") + } + + suggestions := make([]SpellCheckSuggestion, 0, len(suggestionsData)) + for _, suggestionData := range suggestionsData { + suggestionInfo, ok := suggestionData.([]interface{}) + if !ok || len(suggestionInfo) != 2 { + return nil, fmt.Errorf("invalid suggestion format") + } + + scoreStr, ok := suggestionInfo[0].(string) + if !ok { + return nil, fmt.Errorf("invalid suggestion score format") + } + score, err := strconv.ParseFloat(scoreStr, 64) + if err != nil { + return nil, fmt.Errorf("invalid suggestion score value") + } + + suggestion, ok := suggestionInfo[1].(string) + if !ok { + return nil, fmt.Errorf("invalid suggestion format") + } + + suggestions = append(suggestions, SpellCheckSuggestion{ + Score: score, + Suggestion: suggestion, + }) + } + + results = append(results, SpellCheckResult{ + Term: term, + Suggestions: suggestions, + }) + } + + return results, nil +} + +func parseFTSearch(data []interface{}, noContent, withScores, withPayloads, withSortKeys bool) (FTSearchResult, error) { + if len(data) < 1 { + return FTSearchResult{}, fmt.Errorf("unexpected search result format") + } + + total, ok := data[0].(int64) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid total results format") + } + + var results []Document + for i := 1; i < len(data); { + docID, ok := data[i].(string) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid document ID format") + } + + doc := Document{ + ID: docID, + Fields: make(map[string]string), + } + i++ + + if noContent { + results = append(results, doc) + continue + } + + if withScores && i < len(data) { + if scoreStr, ok := data[i].(string); ok { + score, err := strconv.ParseFloat(scoreStr, 64) + if err != nil { + return FTSearchResult{}, fmt.Errorf("invalid score format") + } + doc.Score = &score + i++ + } + } + + if withPayloads && i < len(data) { + if payload, ok := data[i].(string); ok { + doc.Payload = &payload + i++ + } + } + + if withSortKeys && i < len(data) { + if sortKey, ok := data[i].(string); ok { + doc.SortKey = &sortKey + i++ + } + } + + if i < len(data) { + fields, ok := data[i].([]interface{}) + if !ok { + if data[i] == proto.Nil || data[i] == nil { + doc.Error = proto.Nil + doc.Fields = map[string]string{} + fields = []interface{}{} + } else { + return FTSearchResult{}, fmt.Errorf("invalid document fields format") + } + } + + for j := 0; j < len(fields); j += 2 { + key, ok := fields[j].(string) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid field key format") + } + value, ok := fields[j+1].(string) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid field value format") + } + doc.Fields[key] = value + } + i++ + } + + results = append(results, doc) + } + return FTSearchResult{ + Total: int(total), + Docs: results, + }, nil +} + +type FTSearchCmd struct { + baseCmd + val FTSearchResult + options *FTSearchOptions +} + +func newFTSearchCmd(ctx context.Context, options *FTSearchOptions, args ...interface{}) *FTSearchCmd { + return &FTSearchCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + options: options, + } +} + +func (cmd *FTSearchCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTSearchCmd) SetVal(val FTSearchResult) { + cmd.val = val +} + +func (cmd *FTSearchCmd) Result() (FTSearchResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTSearchCmd) Val() FTSearchResult { + return cmd.val +} + +func (cmd *FTSearchCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTSearchCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func (cmd *FTSearchCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + return err + } + cmd.val, err = parseFTSearch(data, cmd.options.NoContent, cmd.options.WithScores, cmd.options.WithPayloads, cmd.options.WithSortKeys) + if err != nil { + return err + } + return nil +} + +// FTHybridResult represents the result of a hybrid search operation +type FTHybridResult struct { + TotalResults int + Results []map[string]interface{} + Warnings []string + ExecutionTime float64 +} + +// FTHybridCursorResult represents cursor result for hybrid search +type FTHybridCursorResult struct { + SearchCursorID int + VsimCursorID int +} + +type FTHybridCmd struct { + baseCmd + val FTHybridResult + cursorVal *FTHybridCursorResult + options *FTHybridOptions + withCursor bool +} + +func newFTHybridCmd(ctx context.Context, options *FTHybridOptions, args ...interface{}) *FTHybridCmd { + var withCursor bool + if options != nil && options.WithCursor { + withCursor = true + } + return &FTHybridCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + options: options, + withCursor: withCursor, + } +} + +func (cmd *FTHybridCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTHybridCmd) SetVal(val FTHybridResult) { + cmd.val = val +} + +func (cmd *FTHybridCmd) Result() (FTHybridResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTHybridCmd) CursorResult() (*FTHybridCursorResult, error) { + return cmd.cursorVal, cmd.err +} + +func (cmd *FTHybridCmd) Val() FTHybridResult { + return cmd.val +} + +func (cmd *FTHybridCmd) CursorVal() *FTHybridCursorResult { + return cmd.cursorVal +} + +func (cmd *FTHybridCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTHybridCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func parseFTHybrid(data []interface{}, withCursor bool) (FTHybridResult, *FTHybridCursorResult, error) { + // Convert to map + resultMap := make(map[string]interface{}) + for i := 0; i < len(data); i += 2 { + if i+1 < len(data) { + key, ok := data[i].(string) + if !ok { + return FTHybridResult{}, nil, fmt.Errorf("invalid key type at index %d", i) + } + resultMap[key] = data[i+1] + } + } + + // Handle cursor result + if withCursor { + searchCursorID, ok1 := resultMap["SEARCH"].(int64) + vsimCursorID, ok2 := resultMap["VSIM"].(int64) + if !ok1 || !ok2 { + return FTHybridResult{}, nil, fmt.Errorf("invalid cursor result format") + } + return FTHybridResult{}, &FTHybridCursorResult{ + SearchCursorID: int(searchCursorID), + VsimCursorID: int(vsimCursorID), + }, nil + } + + // Parse regular result + totalResults, ok := resultMap["total_results"].(int64) + if !ok { + return FTHybridResult{}, nil, fmt.Errorf("invalid total_results format") + } + + resultsData, ok := resultMap["results"].([]interface{}) + if !ok { + return FTHybridResult{}, nil, fmt.Errorf("invalid results format") + } + + // Parse each result item + results := make([]map[string]interface{}, 0, len(resultsData)) + for _, item := range resultsData { + // Try parsing as map[string]interface{} first (RESP3 format) + if itemMap, ok := item.(map[string]interface{}); ok { + results = append(results, itemMap) + continue + } + + // Try parsing as map[interface{}]interface{} (alternative RESP3 format) + if rawMap, ok := item.(map[interface{}]interface{}); ok { + itemMap := make(map[string]interface{}) + for k, v := range rawMap { + if keyStr, ok := k.(string); ok { + itemMap[keyStr] = v + } + } + results = append(results, itemMap) + continue + } + + // Fall back to array format (RESP2 format - key-value pairs) + itemData, ok := item.([]interface{}) + if !ok { + return FTHybridResult{}, nil, fmt.Errorf("invalid result item format") + } + + itemMap := make(map[string]interface{}) + for i := 0; i < len(itemData); i += 2 { + if i+1 < len(itemData) { + key, ok := itemData[i].(string) + if !ok { + return FTHybridResult{}, nil, fmt.Errorf("invalid item key format") + } + itemMap[key] = itemData[i+1] + } + } + results = append(results, itemMap) + } + + // Parse warnings (optional field) + var warnings []string + if warningsData, ok := resultMap["warnings"].([]interface{}); ok { + warnings = make([]string, 0, len(warningsData)) + for _, w := range warningsData { + if ws, ok := w.(string); ok { + warnings = append(warnings, ws) + } + } + } + + // Parse execution time (optional field) + var executionTime float64 + if execTimeVal, exists := resultMap["execution_time"]; exists { + switch v := execTimeVal.(type) { + case string: + var err error + executionTime, err = strconv.ParseFloat(v, 64) + if err != nil { + return FTHybridResult{}, nil, fmt.Errorf("invalid execution_time format: %v", err) + } + case float64: + executionTime = v + case int64: + executionTime = float64(v) + } + } + + return FTHybridResult{ + TotalResults: int(totalResults), + Results: results, + Warnings: warnings, + ExecutionTime: executionTime, + }, nil, nil +} + +func (cmd *FTHybridCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + return err + } + + result, cursorResult, err := parseFTHybrid(data, cmd.withCursor) + if err != nil { + return err + } + + if cmd.withCursor { + cmd.cursorVal = cursorResult + } else { + cmd.val = result + } + return nil +} + +// FTSearch - Executes a search query on an index. +// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. +// For more information, please refer to the Redis documentation about [FT.SEARCH]. +// +// [FT.SEARCH]: (https://redis.io/commands/ft.search/) +func (c cmdable) FTSearch(ctx context.Context, index string, query string) *FTSearchCmd { + args := []interface{}{"FT.SEARCH", index, query} + cmd := newFTSearchCmd(ctx, &FTSearchOptions{}, args...) + _ = c(ctx, cmd) + return cmd +} + +type SearchQuery []interface{} + +// FTSearchQuery - Executes a search query on an index with additional options. +// The 'index' parameter specifies the index to search, the 'query' parameter specifies the search query, +// and the 'options' parameter specifies additional options for the search. +// For more information, please refer to the Redis documentation about [FT.SEARCH]. +// +// [FT.SEARCH]: (https://redis.io/commands/ft.search/) +func FTSearchQuery(query string, options *FTSearchOptions) (SearchQuery, error) { + queryArgs := []interface{}{query} + if options != nil { + if options.NoContent { + queryArgs = append(queryArgs, "NOCONTENT") + } + if options.Verbatim { + queryArgs = append(queryArgs, "VERBATIM") + } + if options.NoStopWords { + queryArgs = append(queryArgs, "NOSTOPWORDS") + } + if options.WithScores { + queryArgs = append(queryArgs, "WITHSCORES") + } + if options.WithPayloads { + queryArgs = append(queryArgs, "WITHPAYLOADS") + } + if options.WithSortKeys { + queryArgs = append(queryArgs, "WITHSORTKEYS") + } + if options.Filters != nil { + for _, filter := range options.Filters { + queryArgs = append(queryArgs, "FILTER", filter.FieldName, filter.Min, filter.Max) + } + } + if options.GeoFilter != nil { + for _, geoFilter := range options.GeoFilter { + queryArgs = append(queryArgs, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit) + } + } + if options.InKeys != nil { + queryArgs = append(queryArgs, "INKEYS", len(options.InKeys)) + queryArgs = append(queryArgs, options.InKeys...) + } + if options.InFields != nil { + queryArgs = append(queryArgs, "INFIELDS", len(options.InFields)) + queryArgs = append(queryArgs, options.InFields...) + } + if options.Return != nil { + queryArgs = append(queryArgs, "RETURN") + queryArgsReturn := []interface{}{} + for _, ret := range options.Return { + queryArgsReturn = append(queryArgsReturn, ret.FieldName) + if ret.As != "" { + queryArgsReturn = append(queryArgsReturn, "AS", ret.As) + } + } + queryArgs = append(queryArgs, len(queryArgsReturn)) + queryArgs = append(queryArgs, queryArgsReturn...) + } + if options.Slop > 0 { + queryArgs = append(queryArgs, "SLOP", options.Slop) + } + if options.Timeout > 0 { + queryArgs = append(queryArgs, "TIMEOUT", options.Timeout) + } + if options.InOrder { + queryArgs = append(queryArgs, "INORDER") + } + if options.Language != "" { + queryArgs = append(queryArgs, "LANGUAGE", options.Language) + } + if options.Expander != "" { + queryArgs = append(queryArgs, "EXPANDER", options.Expander) + } + if options.Scorer != "" { + queryArgs = append(queryArgs, "SCORER", options.Scorer) + } + if options.ExplainScore { + queryArgs = append(queryArgs, "EXPLAINSCORE") + } + if options.Payload != "" { + queryArgs = append(queryArgs, "PAYLOAD", options.Payload) + } + if options.SortBy != nil { + queryArgs = append(queryArgs, "SORTBY") + for _, sortBy := range options.SortBy { + queryArgs = append(queryArgs, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + return nil, fmt.Errorf("FT.SEARCH: ASC and DESC are mutually exclusive") + } + if sortBy.Asc { + queryArgs = append(queryArgs, "ASC") + } + if sortBy.Desc { + queryArgs = append(queryArgs, "DESC") + } + } + if options.SortByWithCount { + queryArgs = append(queryArgs, "WITHCOUNT") + } + } + if options.LimitOffset >= 0 && options.Limit > 0 { + queryArgs = append(queryArgs, "LIMIT", options.LimitOffset, options.Limit) + } + if options.Params != nil { + queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + queryArgs = append(queryArgs, key, value) + } + } + if options.DialectVersion > 0 { + queryArgs = append(queryArgs, "DIALECT", options.DialectVersion) + } else { + queryArgs = append(queryArgs, "DIALECT", 2) + } + } + return queryArgs, nil +} + +// FTSearchWithArgs - Executes a search query on an index with additional options. +// The 'index' parameter specifies the index to search, the 'query' parameter specifies the search query, +// and the 'options' parameter specifies additional options for the search. +// For more information, please refer to the Redis documentation about [FT.SEARCH]. +// +// [FT.SEARCH]: (https://redis.io/commands/ft.search/) +func (c cmdable) FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd { + args := []interface{}{"FT.SEARCH", index, query} + if options != nil { + if options.NoContent { + args = append(args, "NOCONTENT") + } + if options.Verbatim { + args = append(args, "VERBATIM") + } + if options.NoStopWords { + args = append(args, "NOSTOPWORDS") + } + if options.WithScores { + args = append(args, "WITHSCORES") + } + if options.WithPayloads { + args = append(args, "WITHPAYLOADS") + } + if options.WithSortKeys { + args = append(args, "WITHSORTKEYS") + } + if options.Filters != nil { + for _, filter := range options.Filters { + args = append(args, "FILTER", filter.FieldName, filter.Min, filter.Max) + } + } + if options.GeoFilter != nil { + for _, geoFilter := range options.GeoFilter { + args = append(args, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit) + } + } + if options.InKeys != nil { + args = append(args, "INKEYS", len(options.InKeys)) + args = append(args, options.InKeys...) + } + if options.InFields != nil { + args = append(args, "INFIELDS", len(options.InFields)) + args = append(args, options.InFields...) + } + if options.Return != nil { + args = append(args, "RETURN") + argsReturn := []interface{}{} + for _, ret := range options.Return { + argsReturn = append(argsReturn, ret.FieldName) + if ret.As != "" { + argsReturn = append(argsReturn, "AS", ret.As) + } + } + args = append(args, len(argsReturn)) + args = append(args, argsReturn...) + } + if options.Slop > 0 { + args = append(args, "SLOP", options.Slop) + } + if options.Timeout > 0 { + args = append(args, "TIMEOUT", options.Timeout) + } + if options.InOrder { + args = append(args, "INORDER") + } + if options.Language != "" { + args = append(args, "LANGUAGE", options.Language) + } + if options.Expander != "" { + args = append(args, "EXPANDER", options.Expander) + } + if options.Scorer != "" { + args = append(args, "SCORER", options.Scorer) + } + if options.ExplainScore { + args = append(args, "EXPLAINSCORE") + } + if options.Payload != "" { + args = append(args, "PAYLOAD", options.Payload) + } + if options.SortBy != nil { + args = append(args, "SORTBY") + for _, sortBy := range options.SortBy { + args = append(args, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + cmd := newFTSearchCmd(ctx, options, args...) + cmd.SetErr(fmt.Errorf("FT.SEARCH: ASC and DESC are mutually exclusive")) + return cmd + } + if sortBy.Asc { + args = append(args, "ASC") + } + if sortBy.Desc { + args = append(args, "DESC") + } + } + if options.SortByWithCount { + args = append(args, "WITHCOUNT") + } + } + if options.CountOnly { + args = append(args, "LIMIT", 0, 0) + } else { + if options.LimitOffset >= 0 && options.Limit > 0 || options.LimitOffset > 0 && options.Limit == 0 { + args = append(args, "LIMIT", options.LimitOffset, options.Limit) + } + } + if options.Params != nil { + args = append(args, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + args = append(args, key, value) + } + } + if options.DialectVersion > 0 { + args = append(args, "DIALECT", options.DialectVersion) + } else { + args = append(args, "DIALECT", 2) + } + } + cmd := newFTSearchCmd(ctx, options, args...) + _ = c(ctx, cmd) + return cmd +} + +func NewFTSynDumpCmd(ctx context.Context, args ...interface{}) *FTSynDumpCmd { + return &FTSynDumpCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FTSynDumpCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTSynDumpCmd) SetVal(val []FTSynDumpResult) { + cmd.val = val +} + +func (cmd *FTSynDumpCmd) Val() []FTSynDumpResult { + return cmd.val +} + +func (cmd *FTSynDumpCmd) Result() ([]FTSynDumpResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTSynDumpCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTSynDumpCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func (cmd *FTSynDumpCmd) readReply(rd *proto.Reader) error { + termSynonymPairs, err := rd.ReadSlice() + if err != nil { + return err + } + + var results []FTSynDumpResult + for i := 0; i < len(termSynonymPairs); i += 2 { + term, ok := termSynonymPairs[i].(string) + if !ok { + return fmt.Errorf("invalid term format") + } + + synonyms, ok := termSynonymPairs[i+1].([]interface{}) + if !ok { + return fmt.Errorf("invalid synonyms format") + } + + synonymList := make([]string, len(synonyms)) + for j, syn := range synonyms { + synonym, ok := syn.(string) + if !ok { + return fmt.Errorf("invalid synonym format") + } + synonymList[j] = synonym + } + + results = append(results, FTSynDumpResult{ + Term: term, + Synonyms: synonymList, + }) + } + + cmd.val = results + return nil +} + +// FTSynDump - Dumps the contents of a synonym group. +// The 'index' parameter specifies the index to dump. +// For more information, please refer to the Redis documentation: +// [FT.SYNDUMP]: (https://redis.io/commands/ft.syndump/) +func (c cmdable) FTSynDump(ctx context.Context, index string) *FTSynDumpCmd { + cmd := NewFTSynDumpCmd(ctx, "FT.SYNDUMP", index) + _ = c(ctx, cmd) + return cmd +} + +// FTSynUpdate - Creates or updates a synonym group with additional terms. +// The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, and the 'terms' parameter specifies the additional terms. +// For more information, please refer to the Redis documentation: +// [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/) +func (c cmdable) FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd { + args := []interface{}{"FT.SYNUPDATE", index, synGroupId} + args = append(args, terms...) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTSynUpdateWithArgs - Creates or updates a synonym group with additional terms and options. +// The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, the 'options' parameter specifies additional options for the update, and the 'terms' parameter specifies the additional terms. +// For more information, please refer to the Redis documentation: +// [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/) +func (c cmdable) FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd { + args := []interface{}{"FT.SYNUPDATE", index, synGroupId} + if options.SkipInitialScan { + args = append(args, "SKIPINITIALSCAN") + } + args = append(args, terms...) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTTagVals - Returns all distinct values indexed in a tag field. +// The 'index' parameter specifies the index to check, and the 'field' parameter specifies the tag field to retrieve values from. +// For more information, please refer to the Redis documentation: +// [FT.TAGVALS]: (https://redis.io/commands/ft.tagvals/) +func (c cmdable) FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "FT.TAGVALS", index, field) + _ = c(ctx, cmd) + return cmd +} + +// FTHybrid - Executes a hybrid search combining full-text search and vector similarity +// The 'index' parameter specifies the index to search, 'searchExpr' is the search query, +// 'vectorField' is the name of the vector field, and 'vectorData' is the vector to search with. +// FTHybrid is still experimental, the command behaviour and signature may change +func (c cmdable) FTHybrid(ctx context.Context, index string, searchExpr string, vectorField string, vectorData Vector) *FTHybridCmd { + options := &FTHybridOptions{ + CountExpressions: 2, + SearchExpressions: []FTHybridSearchExpression{ + {Query: searchExpr}, + }, + VectorExpressions: []FTHybridVectorExpression{ + {VectorField: vectorField, VectorData: vectorData}, + }, + } + return c.FTHybridWithArgs(ctx, index, options) +} + +// FTHybridWithArgs - Executes a hybrid search with advanced options +// FTHybridWithArgs is still experimental, the command behaviour and signature may change +func (c cmdable) FTHybridWithArgs(ctx context.Context, index string, options *FTHybridOptions) *FTHybridCmd { + args := []interface{}{"FT.HYBRID", index} + + if options != nil { + // Add search expressions + for _, searchExpr := range options.SearchExpressions { + args = append(args, "SEARCH", searchExpr.Query) + + if searchExpr.Scorer != "" { + args = append(args, "SCORER", searchExpr.Scorer) + if len(searchExpr.ScorerParams) > 0 { + args = append(args, searchExpr.ScorerParams...) + } + } + + if searchExpr.YieldScoreAs != "" { + args = append(args, "YIELD_SCORE_AS", searchExpr.YieldScoreAs) + } + } + + // Add vector expressions + for _, vectorExpr := range options.VectorExpressions { + args = append(args, "VSIM", "@"+vectorExpr.VectorField) + + // For FT.HYBRID, we need to send just the raw vector bytes, not the Value() format + // Value() returns [format, data] but FT.HYBRID expects just the blob + vectorValue := vectorExpr.VectorData.Value() + if len(vectorValue) >= 2 { + // vectorValue is [format, data, ...] - we only want the data part + args = append(args, vectorValue[1]) + } else { + // Fallback for unexpected format + args = append(args, vectorValue...) + } + + if vectorExpr.Method != "" { + args = append(args, vectorExpr.Method) + if len(vectorExpr.MethodParams) > 0 { + // MethodParams should be key-value pairs, count them + args = append(args, len(vectorExpr.MethodParams)) + args = append(args, vectorExpr.MethodParams...) + } + } + + if vectorExpr.Filter != "" { + args = append(args, "FILTER", vectorExpr.Filter) + } + + if vectorExpr.YieldScoreAs != "" { + args = append(args, "YIELD_SCORE_AS", vectorExpr.YieldScoreAs) + } + } + + // Add combine/fusion options + if options.Combine != nil { + // Build combine parameters + combineParams := []interface{}{} + + switch options.Combine.Method { + case FTHybridCombineRRF: + if options.Combine.Window > 0 { + combineParams = append(combineParams, "WINDOW", options.Combine.Window) + } + if options.Combine.Constant > 0 { + combineParams = append(combineParams, "CONSTANT", options.Combine.Constant) + } + case FTHybridCombineLinear: + if options.Combine.Alpha > 0 { + combineParams = append(combineParams, "ALPHA", options.Combine.Alpha) + } + if options.Combine.Beta > 0 { + combineParams = append(combineParams, "BETA", options.Combine.Beta) + } + } + + if options.Combine.YieldScoreAs != "" { + combineParams = append(combineParams, "YIELD_SCORE_AS", options.Combine.YieldScoreAs) + } + + // Add COMBINE with method and parameter count + args = append(args, "COMBINE", string(options.Combine.Method)) + if len(combineParams) > 0 { + args = append(args, len(combineParams)) + args = append(args, combineParams...) + } + } + + // Add LOAD (projected fields) + if len(options.Load) > 0 { + args = append(args, "LOAD", len(options.Load)) + for _, field := range options.Load { + args = append(args, field) + } + } + + // Add GROUPBY + if options.GroupBy != nil { + args = append(args, "GROUPBY", options.GroupBy.Count) + for _, field := range options.GroupBy.Fields { + args = append(args, field) + } + if options.GroupBy.ReduceFunc != "" { + args = append(args, "REDUCE", options.GroupBy.ReduceFunc, options.GroupBy.ReduceCount) + args = append(args, options.GroupBy.ReduceParams...) + } + } + + // Add APPLY transformations + for _, apply := range options.Apply { + args = append(args, "APPLY", apply.Expression, "AS", apply.AsField) + } + + // Add SORTBY + if len(options.SortBy) > 0 { + sortByOptions := []interface{}{} + for _, sortBy := range options.SortBy { + sortByOptions = append(sortByOptions, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + cmd := newFTHybridCmd(ctx, options, args...) + cmd.SetErr(fmt.Errorf("FT.HYBRID: ASC and DESC are mutually exclusive")) + return cmd + } + if sortBy.Asc { + sortByOptions = append(sortByOptions, "ASC") + } + if sortBy.Desc { + sortByOptions = append(sortByOptions, "DESC") + } + } + args = append(args, "SORTBY", len(sortByOptions)) + args = append(args, sortByOptions...) + } + + // Add FILTER (post-filter) + if options.Filter != "" { + args = append(args, "FILTER", options.Filter) + } + + // Add LIMIT + if options.LimitOffset >= 0 && options.Limit > 0 || options.LimitOffset > 0 && options.Limit == 0 { + args = append(args, "LIMIT", options.LimitOffset, options.Limit) + } + + // Add PARAMS + if len(options.Params) > 0 { + args = append(args, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + // Parameter keys should already have '$' prefix from the user + // Don't add it again if it's already there + args = append(args, key, value) + } + } + + // Add EXPLAINSCORE + if options.ExplainScore { + args = append(args, "EXPLAINSCORE") + } + + // Add TIMEOUT + if options.Timeout > 0 { + args = append(args, "TIMEOUT", options.Timeout) + } + + // Add WITHCURSOR support + if options.WithCursor { + args = append(args, "WITHCURSOR") + if options.WithCursorOptions != nil { + if options.WithCursorOptions.Count > 0 { + args = append(args, "COUNT", options.WithCursorOptions.Count) + } + if options.WithCursorOptions.MaxIdle > 0 { + args = append(args, "MAXIDLE", options.WithCursorOptions.MaxIdle) + } + } + } + } + + cmd := newFTHybridCmd(ctx, options, args...) + _ = c(ctx, cmd) + return cmd +} diff --git a/vendor/github.com/redis/go-redis/v9/sentinel.go b/vendor/github.com/redis/go-redis/v9/sentinel.go index 39bf1b424..663f7b1ad 100644 --- a/vendor/github.com/redis/go-redis/v9/sentinel.go +++ b/vendor/github.com/redis/go-redis/v9/sentinel.go @@ -4,14 +4,21 @@ import ( "context" "crypto/tls" "errors" + "fmt" "net" + "net/url" + "strconv" "strings" "sync" "time" + "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/pool" "github.com/redis/go-redis/v9/internal/rand" + "github.com/redis/go-redis/v9/internal/util" + "github.com/redis/go-redis/v9/maintnotifications" + "github.com/redis/go-redis/v9/push" ) //------------------------------------------------------------------------------ @@ -57,7 +64,26 @@ type FailoverOptions struct { Protocol int Username string Password string - DB int + + // Push notifications are always enabled for RESP3 connections + // CredentialsProvider allows the username and password to be updated + // before reconnecting. It should return the current username and password. + CredentialsProvider func() (username string, password string) + + // CredentialsProviderContext is an enhanced parameter of CredentialsProvider, + // done to maintain API compatibility. In the future, + // there might be a merge between CredentialsProviderContext and CredentialsProvider. + // There will be a conflict between them; if CredentialsProviderContext exists, we will ignore CredentialsProvider. + CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) + + // StreamingCredentialsProvider is used to retrieve the credentials + // for the connection from an external source. Those credentials may change + // during the connection lifetime. This is useful for managed identity + // scenarios where the credentials are retrieved from an external source. + // + // Currently, this is a placeholder for the future implementation. + StreamingCredentialsProvider auth.StreamingCredentialsProvider + DB int MaxRetries int MinRetryBackoff time.Duration @@ -68,6 +94,20 @@ type FailoverOptions struct { WriteTimeout time.Duration ContextTimeoutEnabled bool + // ReadBufferSize is the size of the bufio.Reader buffer for each connection. + // Larger buffers can improve performance for commands that return large responses. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + ReadBufferSize int + + // WriteBufferSize is the size of the bufio.Writer buffer for each connection. + // Larger buffers can improve performance for large pipelines and commands with many arguments. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + WriteBufferSize int + PoolFIFO bool PoolSize int @@ -93,6 +133,22 @@ type FailoverOptions struct { DisableIdentity bool IdentitySuffix string + + // FailingTimeoutSeconds is the timeout in seconds for marking a cluster node as failing. + // When a node is marked as failing, it will be avoided for this duration. + // Only applies to failover cluster clients. Default is 15 seconds. + FailingTimeoutSeconds int + + UnstableResp3 bool + + // MaintNotificationsConfig is not supported for FailoverClients at the moment + // MaintNotificationsConfig provides custom configuration for maintnotifications upgrades. + // When MaintNotificationsConfig.Mode is not "disabled", the client will handle + // upgrade notifications gracefully and manage connection/pool state transitions + // seamlessly. Requires Protocol: 3 (RESP3) for push notifications. + // If nil, maintnotifications upgrades are disabled. + // (however if Mode is nil, it defaults to "auto" - enable if server supports it) + //MaintNotificationsConfig *maintnotifications.Config } func (opt *FailoverOptions) clientOptions() *Options { @@ -103,15 +159,21 @@ func (opt *FailoverOptions) clientOptions() *Options { Dialer: opt.Dialer, OnConnect: opt.OnConnect, - DB: opt.DB, - Protocol: opt.Protocol, - Username: opt.Username, - Password: opt.Password, + DB: opt.DB, + Protocol: opt.Protocol, + Username: opt.Username, + Password: opt.Password, + CredentialsProvider: opt.CredentialsProvider, + CredentialsProviderContext: opt.CredentialsProviderContext, + StreamingCredentialsProvider: opt.StreamingCredentialsProvider, MaxRetries: opt.MaxRetries, MinRetryBackoff: opt.MinRetryBackoff, MaxRetryBackoff: opt.MaxRetryBackoff, + ReadBufferSize: opt.ReadBufferSize, + WriteBufferSize: opt.WriteBufferSize, + DialTimeout: opt.DialTimeout, ReadTimeout: opt.ReadTimeout, WriteTimeout: opt.WriteTimeout, @@ -130,7 +192,13 @@ func (opt *FailoverOptions) clientOptions() *Options { DisableIdentity: opt.DisableIdentity, DisableIndentity: opt.DisableIndentity, - IdentitySuffix: opt.IdentitySuffix, + + IdentitySuffix: opt.IdentitySuffix, + UnstableResp3: opt.UnstableResp3, + + MaintNotificationsConfig: &maintnotifications.Config{ + Mode: maintnotifications.ModeDisabled, + }, } } @@ -150,6 +218,10 @@ func (opt *FailoverOptions) sentinelOptions(addr string) *Options { MinRetryBackoff: opt.MinRetryBackoff, MaxRetryBackoff: opt.MaxRetryBackoff, + // The sentinel client uses a 4KiB read/write buffer size. + ReadBufferSize: 4096, + WriteBufferSize: 4096, + DialTimeout: opt.DialTimeout, ReadTimeout: opt.ReadTimeout, WriteTimeout: opt.WriteTimeout, @@ -168,7 +240,13 @@ func (opt *FailoverOptions) sentinelOptions(addr string) *Options { DisableIdentity: opt.DisableIdentity, DisableIndentity: opt.DisableIndentity, - IdentitySuffix: opt.IdentitySuffix, + + IdentitySuffix: opt.IdentitySuffix, + UnstableResp3: opt.UnstableResp3, + + MaintNotificationsConfig: &maintnotifications.Config{ + Mode: maintnotifications.ModeDisabled, + }, } } @@ -179,18 +257,25 @@ func (opt *FailoverOptions) clusterOptions() *ClusterOptions { Dialer: opt.Dialer, OnConnect: opt.OnConnect, - Protocol: opt.Protocol, - Username: opt.Username, - Password: opt.Password, + Protocol: opt.Protocol, + Username: opt.Username, + Password: opt.Password, + CredentialsProvider: opt.CredentialsProvider, + CredentialsProviderContext: opt.CredentialsProviderContext, + StreamingCredentialsProvider: opt.StreamingCredentialsProvider, MaxRedirects: opt.MaxRetries, + ReadOnly: opt.ReplicaOnly, RouteByLatency: opt.RouteByLatency, RouteRandomly: opt.RouteRandomly, MinRetryBackoff: opt.MinRetryBackoff, MaxRetryBackoff: opt.MaxRetryBackoff, + ReadBufferSize: opt.ReadBufferSize, + WriteBufferSize: opt.WriteBufferSize, + DialTimeout: opt.DialTimeout, ReadTimeout: opt.ReadTimeout, WriteTimeout: opt.WriteTimeout, @@ -207,16 +292,170 @@ func (opt *FailoverOptions) clusterOptions() *ClusterOptions { TLSConfig: opt.TLSConfig, - DisableIdentity: opt.DisableIdentity, - DisableIndentity: opt.DisableIndentity, - IdentitySuffix: opt.IdentitySuffix, + DisableIdentity: opt.DisableIdentity, + DisableIndentity: opt.DisableIndentity, + IdentitySuffix: opt.IdentitySuffix, + FailingTimeoutSeconds: opt.FailingTimeoutSeconds, + + MaintNotificationsConfig: &maintnotifications.Config{ + Mode: maintnotifications.ModeDisabled, + }, + } +} + +// ParseFailoverURL parses a URL into FailoverOptions that can be used to connect to Redis. +// The URL must be in the form: +// +// redis://:@:/ +// or +// rediss://:@:/ +// +// To add additional addresses, specify the query parameter, "addr" one or more times. e.g: +// +// redis://:@:/?addr=:&addr=: +// or +// rediss://:@:/?addr=:&addr=: +// +// Most Option fields can be set using query parameters, with the following restrictions: +// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries +// - only scalar type fields are supported (bool, int, time.Duration) +// - for time.Duration fields, values must be a valid input for time.ParseDuration(); +// additionally a plain integer as value (i.e. without unit) is interpreted as seconds +// - to disable a duration field, use value less than or equal to 0; to use the default +// value, leave the value blank or remove the parameter +// - only the last value is interpreted if a parameter is given multiple times +// - fields "network", "addr", "sentinel_username" and "sentinel_password" can only be set using other +// URL attributes (scheme, host, userinfo, resp.), query parameters using these +// names will be treated as unknown parameters +// - unknown parameter names will result in an error +// - use "skip_verify=true" to ignore TLS certificate validation +// +// Example: +// +// redis://user:password@localhost:6789?master_name=mymaster&dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791 +// is equivalent to: +// &FailoverOptions{ +// MasterName: "mymaster", +// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"] +// DialTimeout: 3 * time.Second, // no time unit = seconds +// ReadTimeout: 6 * time.Second, +// } +func ParseFailoverURL(redisURL string) (*FailoverOptions, error) { + u, err := url.Parse(redisURL) + if err != nil { + return nil, err + } + return setupFailoverConn(u) +} + +func setupFailoverConn(u *url.URL) (*FailoverOptions, error) { + o := &FailoverOptions{} + + o.SentinelUsername, o.SentinelPassword = getUserPassword(u) + + h, p := getHostPortWithDefaults(u) + o.SentinelAddrs = append(o.SentinelAddrs, net.JoinHostPort(h, p)) + + switch u.Scheme { + case "rediss": + o.TLSConfig = &tls.Config{ServerName: h, MinVersion: tls.VersionTLS12} + case "redis": + o.TLSConfig = nil + default: + return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme) + } + + f := strings.FieldsFunc(u.Path, func(r rune) bool { + return r == '/' + }) + switch len(f) { + case 0: + o.DB = 0 + case 1: + var err error + if o.DB, err = strconv.Atoi(f[0]); err != nil { + return nil, fmt.Errorf("redis: invalid database number: %q", f[0]) + } + default: + return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path) } + + return setupFailoverConnParams(u, o) +} + +func setupFailoverConnParams(u *url.URL, o *FailoverOptions) (*FailoverOptions, error) { + q := queryOptions{q: u.Query()} + + o.MasterName = q.string("master_name") + o.ClientName = q.string("client_name") + o.RouteByLatency = q.bool("route_by_latency") + o.RouteRandomly = q.bool("route_randomly") + o.ReplicaOnly = q.bool("replica_only") + o.UseDisconnectedReplicas = q.bool("use_disconnected_replicas") + o.Protocol = q.int("protocol") + o.Username = q.string("username") + o.Password = q.string("password") + o.MaxRetries = q.int("max_retries") + o.MinRetryBackoff = q.duration("min_retry_backoff") + o.MaxRetryBackoff = q.duration("max_retry_backoff") + o.DialTimeout = q.duration("dial_timeout") + o.ReadTimeout = q.duration("read_timeout") + o.WriteTimeout = q.duration("write_timeout") + o.ContextTimeoutEnabled = q.bool("context_timeout_enabled") + o.PoolFIFO = q.bool("pool_fifo") + o.PoolSize = q.int("pool_size") + o.MinIdleConns = q.int("min_idle_conns") + o.MaxIdleConns = q.int("max_idle_conns") + o.MaxActiveConns = q.int("max_active_conns") + o.ConnMaxLifetime = q.duration("conn_max_lifetime") + o.ConnMaxIdleTime = q.duration("conn_max_idle_time") + o.PoolTimeout = q.duration("pool_timeout") + o.DisableIdentity = q.bool("disableIdentity") + o.IdentitySuffix = q.string("identitySuffix") + o.UnstableResp3 = q.bool("unstable_resp3") + + if q.err != nil { + return nil, q.err + } + + if tmp := q.string("db"); tmp != "" { + db, err := strconv.Atoi(tmp) + if err != nil { + return nil, fmt.Errorf("redis: invalid database number: %w", err) + } + o.DB = db + } + + addrs := q.strings("addr") + for _, addr := range addrs { + h, p, err := net.SplitHostPort(addr) + if err != nil || h == "" || p == "" { + return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr) + } + + o.SentinelAddrs = append(o.SentinelAddrs, net.JoinHostPort(h, p)) + } + + if o.TLSConfig != nil && q.has("skip_verify") { + o.TLSConfig.InsecureSkipVerify = q.bool("skip_verify") + } + + // any parameters left? + if r := q.remaining(); len(r) > 0 { + return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", ")) + } + + return o, nil } // NewFailoverClient returns a Redis client that uses Redis Sentinel // for automatic failover. It's safe for concurrent use by multiple // goroutines. func NewFailoverClient(failoverOpt *FailoverOptions) *Client { + if failoverOpt == nil { + panic("redis: NewFailoverClient nil options") + } + if failoverOpt.RouteByLatency { panic("to route commands by latency, use NewFailoverClusterClient") } @@ -240,8 +479,6 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client { opt.Dialer = masterReplicaDialer(failover) opt.init() - var connPool *pool.ConnPool - rdb := &Client{ baseClient: &baseClient{ opt: opt, @@ -249,15 +486,29 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client { } rdb.init() - connPool = newConnPool(opt, rdb.dialHook) - rdb.connPool = connPool - rdb.onClose = failover.Close + // Initialize push notification processor using shared helper + // Use void processor by default for RESP2 connections + rdb.pushProcessor = initializePushProcessor(opt) + + var err error + rdb.connPool, err = newConnPool(opt, rdb.dialHook) + if err != nil { + panic(fmt.Errorf("redis: failed to create connection pool: %w", err)) + } + rdb.pubSubPool, err = newPubSubPool(opt, rdb.dialHook) + if err != nil { + panic(fmt.Errorf("redis: failed to create pubsub pool: %w", err)) + } + + rdb.onClose = rdb.wrappedOnClose(failover.Close) failover.mu.Lock() failover.onFailover = func(ctx context.Context, addr string) { - _ = connPool.Filter(func(cn *pool.Conn) bool { - return cn.RemoteAddr().String() != addr - }) + if connPool, ok := rdb.connPool.(*pool.ConnPool); ok { + _ = connPool.Filter(func(cn *pool.Conn) bool { + return cn.RemoteAddr().String() != addr + }) + } } failover.mu.Unlock() @@ -302,10 +553,12 @@ func masterReplicaDialer( // SentinelClient is a client for a Redis Sentinel. type SentinelClient struct { *baseClient - hooksMixin } func NewSentinelClient(opt *Options) *SentinelClient { + if opt == nil { + panic("redis: NewSentinelClient nil options") + } opt.init() c := &SentinelClient{ baseClient: &baseClient{ @@ -313,15 +566,40 @@ func NewSentinelClient(opt *Options) *SentinelClient { }, } + // Initialize push notification processor using shared helper + // Use void processor for Sentinel clients + c.pushProcessor = NewVoidPushNotificationProcessor() + c.initHooks(hooks{ dial: c.baseClient.dial, process: c.baseClient.process, }) - c.connPool = newConnPool(opt, c.dialHook) + var err error + c.connPool, err = newConnPool(opt, c.dialHook) + if err != nil { + panic(fmt.Errorf("redis: failed to create connection pool: %w", err)) + } + c.pubSubPool, err = newPubSubPool(opt, c.dialHook) + if err != nil { + panic(fmt.Errorf("redis: failed to create pubsub pool: %w", err)) + } return c } +// GetPushNotificationHandler returns the handler for a specific push notification name. +// Returns nil if no handler is registered for the given name. +func (c *SentinelClient) GetPushNotificationHandler(pushNotificationName string) push.NotificationHandler { + return c.pushProcessor.GetHandler(pushNotificationName) +} + +// RegisterPushNotificationHandler registers a handler for a specific push notification name. +// Returns an error if a handler is already registered for this push notification name. +// If protected is true, the handler cannot be unregistered. +func (c *SentinelClient) RegisterPushNotificationHandler(pushNotificationName string, handler push.NotificationHandler, protected bool) error { + return c.pushProcessor.RegisterHandler(pushNotificationName, handler, protected) +} + func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error { err := c.processHook(ctx, cmd) cmd.SetErr(err) @@ -331,13 +609,31 @@ func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error { func (c *SentinelClient) pubSub() *PubSub { pubsub := &PubSub{ opt: c.opt, - - newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { - return c.newConn(ctx) + newConn: func(ctx context.Context, addr string, channels []string) (*pool.Conn, error) { + cn, err := c.pubSubPool.NewConn(ctx, c.opt.Network, addr, channels) + if err != nil { + return nil, err + } + // will return nil if already initialized + err = c.initConn(ctx, cn) + if err != nil { + _ = cn.Close() + return nil, err + } + // Track connection in PubSubPool + c.pubSubPool.TrackConn(cn) + return cn, nil + }, + closeConn: func(cn *pool.Conn) error { + // Untrack connection from PubSubPool + c.pubSubPool.UntrackConn(cn) + _ = cn.Close() + return nil }, - closeConn: c.connPool.CloseConn, + pushProcessor: c.pushProcessor, } pubsub.init() + return pubsub } @@ -472,10 +768,10 @@ type sentinelFailover struct { onFailover func(ctx context.Context, addr string) onUpdate func(ctx context.Context) - mu sync.RWMutex - _masterAddr string - sentinel *SentinelClient - pubsub *PubSub + mu sync.RWMutex + masterAddr string + sentinel *SentinelClient + pubsub *PubSub } func (c *sentinelFailover) Close() error { @@ -560,29 +856,70 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) { } } - for i, sentinelAddr := range c.sentinelAddrs { - sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr)) + // short circuit if no sentinels configured + if len(c.sentinelAddrs) == 0 { + return "", errors.New("redis: no sentinels configured") + } - masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result() - if err != nil { - _ = sentinel.Close() - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return "", err - } - internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s", - c.opt.MasterName, err) - continue - } + var ( + masterAddr string + wg sync.WaitGroup + once sync.Once + errCh = make(chan error, len(c.sentinelAddrs)) + ) - // Push working sentinel to the top. - c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] - c.setSentinel(ctx, sentinel) + ctx, cancel := context.WithCancel(ctx) + defer cancel() - addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) - return addr, nil + for i, sentinelAddr := range c.sentinelAddrs { + wg.Add(1) + go func(i int, addr string) { + defer wg.Done() + sentinelCli := NewSentinelClient(c.opt.sentinelOptions(addr)) + addrVal, err := sentinelCli.GetMasterAddrByName(ctx, c.opt.MasterName).Result() + if err != nil { + internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName addr=%s, master=%q failed: %s", + addr, c.opt.MasterName, err) + _ = sentinelCli.Close() + errCh <- err + return + } + once.Do(func() { + masterAddr = net.JoinHostPort(addrVal[0], addrVal[1]) + // Push working sentinel to the top + c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] + c.setSentinel(ctx, sentinelCli) + internal.Logger.Printf(ctx, "sentinel: selected addr=%s masterAddr=%s", addr, masterAddr) + cancel() + }) + }(i, sentinelAddr) + } + + wg.Wait() + close(errCh) + if masterAddr != "" { + return masterAddr, nil + } + errs := make([]error, 0, len(errCh)) + for err := range errCh { + errs = append(errs, err) } + return "", fmt.Errorf("redis: all sentinels specified in configuration are unreachable: %s", joinErrors(errs)) +} - return "", errors.New("redis: all sentinels specified in configuration are unreachable") +func joinErrors(errs []error) string { + if len(errs) == 0 { + return "" + } + if len(errs) == 1 { + return errs[0].Error() + } + b := []byte(errs[0].Error()) + for _, err := range errs[1:] { + b = append(b, '\n') + b = append(b, err.Error()...) + } + return util.BytesToString(b) } func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected bool) ([]string, error) { @@ -702,7 +1039,7 @@ func parseReplicaAddrs(addrs []map[string]string, keepDisconnected bool) []strin func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) { c.mu.RLock() - currentAddr := c._masterAddr //nolint:ifshort + currentAddr := c.masterAddr //nolint:ifshort c.mu.RUnlock() if addr == currentAddr { @@ -712,10 +1049,10 @@ func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) { c.mu.Lock() defer c.mu.Unlock() - if addr == c._masterAddr { + if addr == c.masterAddr { return } - c._masterAddr = addr + c.masterAddr = addr internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q", c.opt.MasterName, addr) @@ -800,6 +1137,10 @@ func contains(slice []string, str string) bool { // NewFailoverClusterClient returns a client that supports routing read-only commands // to a replica node. func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient { + if failoverOpt == nil { + panic("redis: NewFailoverClusterClient nil options") + } + sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs)) copy(sentinelAddrs, failoverOpt.SentinelAddrs) @@ -809,6 +1150,22 @@ func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient { } opt := failoverOpt.clusterOptions() + if failoverOpt.DB != 0 { + onConnect := opt.OnConnect + + opt.OnConnect = func(ctx context.Context, cn *Conn) error { + if err := cn.Select(ctx, failoverOpt.DB).Err(); err != nil { + return err + } + + if onConnect != nil { + return onConnect(ctx, cn) + } + + return nil + } + } + opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) { masterAddr, err := failover.MasterAddr(ctx) if err != nil { diff --git a/vendor/github.com/redis/go-redis/v9/set_commands.go b/vendor/github.com/redis/go-redis/v9/set_commands.go index cef8ad6d8..79efa6e40 100644 --- a/vendor/github.com/redis/go-redis/v9/set_commands.go +++ b/vendor/github.com/redis/go-redis/v9/set_commands.go @@ -1,6 +1,10 @@ package redis -import "context" +import ( + "context" + + "github.com/redis/go-redis/v9/internal/hashtag" +) type SetCmdable interface { SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd @@ -78,16 +82,15 @@ func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd { } func (c cmdable) SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd { - args := make([]interface{}, 4+len(keys)) + numKeys := len(keys) + args := make([]interface{}, 4+numKeys) args[0] = "sintercard" - numkeys := int64(0) + args[1] = numKeys for i, key := range keys { args[2+i] = key - numkeys++ } - args[1] = numkeys - args[2+numkeys] = "limit" - args[3+numkeys] = limit + args[2+numKeys] = "limit" + args[3+numKeys] = limit cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -212,6 +215,9 @@ func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match str args = append(args, "count", count) } cmd := NewScanCmd(ctx, c, args...) + if hashtag.Present(match) { + cmd.SetFirstKeyPos(4) + } _ = c(ctx, cmd) return cmd } diff --git a/vendor/github.com/redis/go-redis/v9/sortedset_commands.go b/vendor/github.com/redis/go-redis/v9/sortedset_commands.go index 670140270..7827babc8 100644 --- a/vendor/github.com/redis/go-redis/v9/sortedset_commands.go +++ b/vendor/github.com/redis/go-redis/v9/sortedset_commands.go @@ -2,8 +2,11 @@ package redis import ( "context" + "errors" "strings" "time" + + "github.com/redis/go-redis/v9/internal/hashtag" ) type SortedSetCmdable interface { @@ -257,16 +260,15 @@ func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd } func (c cmdable) ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd { - args := make([]interface{}, 4+len(keys)) + numKeys := len(keys) + args := make([]interface{}, 4+numKeys) args[0] = "zintercard" - numkeys := int64(0) + args[1] = numKeys for i, key := range keys { args[2+i] = key - numkeys++ } - args[1] = numkeys - args[2+numkeys] = "limit" - args[3+numkeys] = limit + args[2+numKeys] = "limit" + args[3+numKeys] = limit cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -312,7 +314,9 @@ func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSlic case 1: args = append(args, count[0]) default: - panic("too many arguments") + cmd := NewZSliceCmd(ctx) + cmd.SetErr(errors.New("too many arguments")) + return cmd } cmd := NewZSliceCmd(ctx, args...) @@ -332,7 +336,9 @@ func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSlic case 1: args = append(args, count[0]) default: - panic("too many arguments") + cmd := NewZSliceCmd(ctx) + cmd.SetErr(errors.New("too many arguments")) + return cmd } cmd := NewZSliceCmd(ctx, args...) @@ -720,6 +726,9 @@ func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match str args = append(args, "count", count) } cmd := NewScanCmd(ctx, c, args...) + if hashtag.Present(match) { + cmd.SetFirstKeyPos(4) + } _ = c(ctx, cmd) return cmd } diff --git a/vendor/github.com/redis/go-redis/v9/stream_commands.go b/vendor/github.com/redis/go-redis/v9/stream_commands.go index 6d7b22922..5573e48b9 100644 --- a/vendor/github.com/redis/go-redis/v9/stream_commands.go +++ b/vendor/github.com/redis/go-redis/v9/stream_commands.go @@ -7,7 +7,9 @@ import ( type StreamCmdable interface { XAdd(ctx context.Context, a *XAddArgs) *StringCmd + XAckDel(ctx context.Context, stream string, group string, mode string, ids ...string) *SliceCmd XDel(ctx context.Context, stream string, ids ...string) *IntCmd + XDelEx(ctx context.Context, stream string, mode string, ids ...string) *SliceCmd XLen(ctx context.Context, stream string) *IntCmd XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd @@ -31,8 +33,12 @@ type StreamCmdable interface { XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd + XTrimMaxLenMode(ctx context.Context, key string, maxLen int64, mode string) *IntCmd + XTrimMaxLenApproxMode(ctx context.Context, key string, maxLen, limit int64, mode string) *IntCmd XTrimMinID(ctx context.Context, key string, minID string) *IntCmd XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd + XTrimMinIDMode(ctx context.Context, key string, minID string, mode string) *IntCmd + XTrimMinIDApproxMode(ctx context.Context, key string, minID string, limit int64, mode string) *IntCmd XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd XInfoStream(ctx context.Context, key string) *XInfoStreamCmd XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd @@ -54,6 +60,7 @@ type XAddArgs struct { // Approx causes MaxLen and MinID to use "~" matcher (instead of "="). Approx bool Limit int64 + Mode string ID string Values interface{} } @@ -81,6 +88,11 @@ func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { if a.Limit > 0 { args = append(args, "limit", a.Limit) } + + if a.Mode != "" { + args = append(args, a.Mode) + } + if a.ID != "" { args = append(args, a.ID) } else { @@ -93,6 +105,16 @@ func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { return cmd } +func (c cmdable) XAckDel(ctx context.Context, stream string, group string, mode string, ids ...string) *SliceCmd { + args := []interface{}{"xackdel", stream, group, mode, "ids", len(ids)} + for _, id := range ids { + args = append(args, id) + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd { args := []interface{}{"xdel", stream} for _, id := range ids { @@ -103,6 +125,16 @@ func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd return cmd } +func (c cmdable) XDelEx(ctx context.Context, stream string, mode string, ids ...string) *SliceCmd { + args := []interface{}{"xdelex", stream, mode, "ids", len(ids)} + for _, id := range ids { + args = append(args, id) + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd { cmd := NewIntCmd(ctx, "xlen", stream) _ = c(ctx, cmd) @@ -231,6 +263,7 @@ type XReadGroupArgs struct { Count int64 Block time.Duration NoAck bool + Claim time.Duration // Claim idle pending entries older than this duration } func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd { @@ -250,6 +283,10 @@ func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSlic args = append(args, "noack") keyPos++ } + if a.Claim > 0 { + args = append(args, "claim", int64(a.Claim/time.Millisecond)) + keyPos += 2 + } args = append(args, "streams") keyPos++ for _, s := range a.Streams { @@ -375,6 +412,8 @@ func xClaimArgs(a *XClaimArgs) []interface{} { return args } +// TODO: refactor xTrim, xTrimMode and the wrappers over the functions + // xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default). // example: // @@ -418,6 +457,42 @@ func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, return c.xTrim(ctx, key, "minid", true, minID, limit) } +func (c cmdable) xTrimMode( + ctx context.Context, key, strategy string, + approx bool, threshold interface{}, limit int64, + mode string, +) *IntCmd { + args := make([]interface{}, 0, 7) + args = append(args, "xtrim", key, strategy) + if approx { + args = append(args, "~") + } + args = append(args, threshold) + if limit > 0 { + args = append(args, "limit", limit) + } + args = append(args, mode) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XTrimMaxLenMode(ctx context.Context, key string, maxLen int64, mode string) *IntCmd { + return c.xTrimMode(ctx, key, "maxlen", false, maxLen, 0, mode) +} + +func (c cmdable) XTrimMaxLenApproxMode(ctx context.Context, key string, maxLen, limit int64, mode string) *IntCmd { + return c.xTrimMode(ctx, key, "maxlen", true, maxLen, limit, mode) +} + +func (c cmdable) XTrimMinIDMode(ctx context.Context, key string, minID string, mode string) *IntCmd { + return c.xTrimMode(ctx, key, "minid", false, minID, 0, mode) +} + +func (c cmdable) XTrimMinIDApproxMode(ctx context.Context, key string, minID string, limit int64, mode string) *IntCmd { + return c.xTrimMode(ctx, key, "minid", true, minID, limit, mode) +} + func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd { cmd := NewXInfoConsumersCmd(ctx, key, group) _ = c(ctx, cmd) diff --git a/vendor/github.com/redis/go-redis/v9/string_commands.go b/vendor/github.com/redis/go-redis/v9/string_commands.go index eff5880dc..f3c33f4cb 100644 --- a/vendor/github.com/redis/go-redis/v9/string_commands.go +++ b/vendor/github.com/redis/go-redis/v9/string_commands.go @@ -2,6 +2,7 @@ package redis import ( "context" + "fmt" "time" ) @@ -9,6 +10,8 @@ type StringCmdable interface { Append(ctx context.Context, key, value string) *IntCmd Decr(ctx context.Context, key string) *IntCmd DecrBy(ctx context.Context, key string, decrement int64) *IntCmd + DelExArgs(ctx context.Context, key string, a DelExArgs) *IntCmd + Digest(ctx context.Context, key string) *DigestCmd Get(ctx context.Context, key string) *StringCmd GetRange(ctx context.Context, key string, start, end int64) *StringCmd GetSet(ctx context.Context, key string, value interface{}) *StringCmd @@ -21,9 +24,18 @@ type StringCmdable interface { MGet(ctx context.Context, keys ...string) *SliceCmd MSet(ctx context.Context, values ...interface{}) *StatusCmd MSetNX(ctx context.Context, values ...interface{}) *BoolCmd + MSetEX(ctx context.Context, args MSetEXArgs, values ...interface{}) *IntCmd Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd + SetIFEQ(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StatusCmd + SetIFEQGet(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StringCmd + SetIFNE(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StatusCmd + SetIFNEGet(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StringCmd + SetIFDEQ(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StatusCmd + SetIFDEQGet(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StringCmd + SetIFDNE(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StatusCmd + SetIFDNEGet(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StringCmd SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd @@ -48,6 +60,76 @@ func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCm return cmd } +// DelExArgs provides arguments for the DelExArgs function. +type DelExArgs struct { + // Mode can be `IFEQ`, `IFNE`, `IFDEQ`, or `IFDNE`. + Mode string + + // MatchValue is used with IFEQ/IFNE modes for compare-and-delete operations. + // - IFEQ: only delete if current value equals MatchValue + // - IFNE: only delete if current value does not equal MatchValue + MatchValue interface{} + + // MatchDigest is used with IFDEQ/IFDNE modes for digest-based compare-and-delete. + // - IFDEQ: only delete if current value's digest equals MatchDigest + // - IFDNE: only delete if current value's digest does not equal MatchDigest + // + // The digest is a uint64 xxh3 hash value. + // + // For examples of client-side digest generation, see: + // example/digest-optimistic-locking/ + MatchDigest uint64 +} + +// DelExArgs Redis `DELEX key [IFEQ|IFNE|IFDEQ|IFDNE] match-value` command. +// Compare-and-delete with flexible conditions. +// +// Returns the number of keys that were removed (0 or 1). +// +// NOTE DelExArgs is still experimental +// it's signature and behaviour may change +func (c cmdable) DelExArgs(ctx context.Context, key string, a DelExArgs) *IntCmd { + args := []interface{}{"delex", key} + + if a.Mode != "" { + args = append(args, a.Mode) + + // Add match value/digest based on mode + switch a.Mode { + case "ifeq", "IFEQ", "ifne", "IFNE": + if a.MatchValue != nil { + args = append(args, a.MatchValue) + } + case "ifdeq", "IFDEQ", "ifdne", "IFDNE": + if a.MatchDigest != 0 { + args = append(args, fmt.Sprintf("%016x", a.MatchDigest)) + } + } + } + + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Digest returns the xxh3 hash (uint64) of the specified key's value. +// +// The digest is a 64-bit xxh3 hash that can be used for optimistic locking +// with SetIFDEQ, SetIFDNE, and DelExArgs commands. +// +// For examples of client-side digest generation and usage patterns, see: +// example/digest-optimistic-locking/ +// +// Redis 8.4+. See https://redis.io/commands/digest/ +// +// NOTE Digest is still experimental +// it's signature and behaviour may change +func (c cmdable) Digest(ctx context.Context, key string) *DigestCmd { + cmd := NewDigestCmd(ctx, "digest", key) + _ = c(ctx, cmd) + return cmd +} + // Get Redis `GET key` command. It returns redis.Nil error when key does not exist. func (c cmdable) Get(ctx context.Context, key string) *StringCmd { cmd := NewStringCmd(ctx, "get", key) @@ -112,6 +194,35 @@ func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *Fl return cmd } +type SetCondition string + +const ( + // NX only set the keys and their expiration if none exist + NX SetCondition = "NX" + // XX only set the keys and their expiration if all already exist + XX SetCondition = "XX" +) + +type ExpirationMode string + +const ( + // EX sets expiration in seconds + EX ExpirationMode = "EX" + // PX sets expiration in milliseconds + PX ExpirationMode = "PX" + // EXAT sets expiration as Unix timestamp in seconds + EXAT ExpirationMode = "EXAT" + // PXAT sets expiration as Unix timestamp in milliseconds + PXAT ExpirationMode = "PXAT" + // KEEPTTL keeps the existing TTL + KEEPTTL ExpirationMode = "KEEPTTL" +) + +type ExpirationOption struct { + Mode ExpirationMode + Value int64 +} + func (c cmdable) LCS(ctx context.Context, q *LCSQuery) *LCSCmd { cmd := NewLCSCmd(ctx, q) _ = c(ctx, cmd) @@ -157,6 +268,49 @@ func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd { return cmd } +type MSetEXArgs struct { + Condition SetCondition + Expiration *ExpirationOption +} + +// MSetEX sets the given keys to their respective values. +// This command is an extension of the MSETNX that adds expiration and XX options. +// Available since Redis 8.4 +// Important: When this method is used with Cluster clients, all keys +// must be in the same hash slot, otherwise CROSSSLOT error will be returned. +// For more information, see https://redis.io/commands/msetex +func (c cmdable) MSetEX(ctx context.Context, args MSetEXArgs, values ...interface{}) *IntCmd { + expandedArgs := appendArgs([]interface{}{}, values) + numkeys := len(expandedArgs) / 2 + + cmdArgs := make([]interface{}, 0, 2+len(expandedArgs)+3) + cmdArgs = append(cmdArgs, "msetex", numkeys) + cmdArgs = append(cmdArgs, expandedArgs...) + + if args.Condition != "" { + cmdArgs = append(cmdArgs, string(args.Condition)) + } + + if args.Expiration != nil { + switch args.Expiration.Mode { + case EX: + cmdArgs = append(cmdArgs, "ex", args.Expiration.Value) + case PX: + cmdArgs = append(cmdArgs, "px", args.Expiration.Value) + case EXAT: + cmdArgs = append(cmdArgs, "exat", args.Expiration.Value) + case PXAT: + cmdArgs = append(cmdArgs, "pxat", args.Expiration.Value) + case KEEPTTL: + cmdArgs = append(cmdArgs, "keepttl") + } + } + + cmd := NewIntCmd(ctx, cmdArgs...) + _ = c(ctx, cmd) + return cmd +} + // Set Redis `SET key value [expiration]` command. // Use expiration for `SETEx`-like behavior. // @@ -185,9 +339,24 @@ func (c cmdable) Set(ctx context.Context, key string, value interface{}, expirat // SetArgs provides arguments for the SetArgs function. type SetArgs struct { - // Mode can be `NX` or `XX` or empty. + // Mode can be `NX`, `XX`, `IFEQ`, `IFNE`, `IFDEQ`, `IFDNE` or empty. Mode string + // MatchValue is used with IFEQ/IFNE modes for compare-and-set operations. + // - IFEQ: only set if current value equals MatchValue + // - IFNE: only set if current value does not equal MatchValue + MatchValue interface{} + + // MatchDigest is used with IFDEQ/IFDNE modes for digest-based compare-and-set. + // - IFDEQ: only set if current value's digest equals MatchDigest + // - IFDNE: only set if current value's digest does not equal MatchDigest + // + // The digest is a uint64 xxh3 hash value. + // + // For examples of client-side digest generation, see: + // example/digest-optimistic-locking/ + MatchDigest uint64 + // Zero `TTL` or `Expiration` means that the key has no expiration time. TTL time.Duration ExpireAt time.Time @@ -223,6 +392,18 @@ func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a S if a.Mode != "" { args = append(args, a.Mode) + + // Add match value/digest for CAS modes + switch a.Mode { + case "ifeq", "IFEQ", "ifne", "IFNE": + if a.MatchValue != nil { + args = append(args, a.MatchValue) + } + case "ifdeq", "IFDEQ", "ifdne", "IFDNE": + if a.MatchDigest != 0 { + args = append(args, fmt.Sprintf("%016x", a.MatchDigest)) + } + } } if a.Get { @@ -290,6 +471,270 @@ func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expir return cmd } +// SetIFEQ Redis `SET key value [expiration] IFEQ match-value` command. +// Compare-and-set: only sets the value if the current value equals matchValue. +// +// Returns "OK" on success. +// Returns nil if the operation was aborted due to condition not matching. +// Zero expiration means the key has no expiration time. +// +// NOTE SetIFEQ is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFEQ(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StatusCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifeq", matchValue) + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFEQGet Redis `SET key value [expiration] IFEQ match-value GET` command. +// Compare-and-set with GET: only sets the value if the current value equals matchValue, +// and returns the previous value. +// +// Returns the previous value on success. +// Returns nil if the operation was aborted due to condition not matching. +// Zero expiration means the key has no expiration time. +// +// NOTE SetIFEQGet is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFEQGet(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StringCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifeq", matchValue, "get") + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFNE Redis `SET key value [expiration] IFNE match-value` command. +// Compare-and-set: only sets the value if the current value does not equal matchValue. +// +// Returns "OK" on success. +// Returns nil if the operation was aborted due to condition not matching. +// Zero expiration means the key has no expiration time. +// +// NOTE SetIFNE is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFNE(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StatusCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifne", matchValue) + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFNEGet Redis `SET key value [expiration] IFNE match-value GET` command. +// Compare-and-set with GET: only sets the value if the current value does not equal matchValue, +// and returns the previous value. +// +// Returns the previous value on success. +// Returns nil if the operation was aborted due to condition not matching. +// Zero expiration means the key has no expiration time. +// +// NOTE SetIFNEGet is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFNEGet(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StringCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifne", matchValue, "get") + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFDEQ sets the value only if the current value's digest equals matchDigest. +// +// This is a compare-and-set operation using xxh3 digest for optimistic locking. +// The matchDigest parameter is a uint64 xxh3 hash value. +// +// Returns "OK" on success. +// Returns redis.Nil if the digest doesn't match (value was modified). +// Zero expiration means the key has no expiration time. +// +// For examples of client-side digest generation and usage patterns, see: +// example/digest-optimistic-locking/ +// +// Redis 8.4+. See https://redis.io/commands/set/ +// +// NOTE SetIFNEQ is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFDEQ(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StatusCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifdeq", fmt.Sprintf("%016x", matchDigest)) + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFDEQGet sets the value only if the current value's digest equals matchDigest, +// and returns the previous value. +// +// This is a compare-and-set operation using xxh3 digest for optimistic locking. +// The matchDigest parameter is a uint64 xxh3 hash value. +// +// Returns the previous value on success. +// Returns redis.Nil if the digest doesn't match (value was modified). +// Zero expiration means the key has no expiration time. +// +// For examples of client-side digest generation and usage patterns, see: +// example/digest-optimistic-locking/ +// +// Redis 8.4+. See https://redis.io/commands/set/ +// +// NOTE SetIFNEQGet is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFDEQGet(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StringCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifdeq", fmt.Sprintf("%016x", matchDigest), "get") + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFDNE sets the value only if the current value's digest does NOT equal matchDigest. +// +// This is a compare-and-set operation using xxh3 digest for optimistic locking. +// The matchDigest parameter is a uint64 xxh3 hash value. +// +// Returns "OK" on success (digest didn't match, value was set). +// Returns redis.Nil if the digest matches (value was not modified). +// Zero expiration means the key has no expiration time. +// +// For examples of client-side digest generation and usage patterns, see: +// example/digest-optimistic-locking/ +// +// Redis 8.4+. See https://redis.io/commands/set/ +// +// NOTE SetIFDNE is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFDNE(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StatusCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifdne", fmt.Sprintf("%016x", matchDigest)) + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFDNEGet sets the value only if the current value's digest does NOT equal matchDigest, +// and returns the previous value. +// +// This is a compare-and-set operation using xxh3 digest for optimistic locking. +// The matchDigest parameter is a uint64 xxh3 hash value. +// +// Returns the previous value on success (digest didn't match, value was set). +// Returns redis.Nil if the digest matches (value was not modified). +// Zero expiration means the key has no expiration time. +// +// For examples of client-side digest generation and usage patterns, see: +// example/digest-optimistic-locking/ +// +// Redis 8.4+. See https://redis.io/commands/set/ +// +// NOTE SetIFDNEGet is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFDNEGet(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StringCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifdne", fmt.Sprintf("%016x", matchDigest), "get") + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd { cmd := NewIntCmd(ctx, "setrange", key, offset, value) _ = c(ctx, cmd) diff --git a/vendor/github.com/redis/go-redis/v9/timeseries_commands.go b/vendor/github.com/redis/go-redis/v9/timeseries_commands.go index 6f1b2fa45..82d8cdfcf 100644 --- a/vendor/github.com/redis/go-redis/v9/timeseries_commands.go +++ b/vendor/github.com/redis/go-redis/v9/timeseries_commands.go @@ -40,25 +40,32 @@ type TimeseriesCmdable interface { } type TSOptions struct { - Retention int - ChunkSize int - Encoding string - DuplicatePolicy string - Labels map[string]string + Retention int + ChunkSize int + Encoding string + DuplicatePolicy string + Labels map[string]string + IgnoreMaxTimeDiff int64 + IgnoreMaxValDiff float64 } type TSIncrDecrOptions struct { - Timestamp int64 - Retention int - ChunkSize int - Uncompressed bool - Labels map[string]string + Timestamp int64 + Retention int + ChunkSize int + Uncompressed bool + DuplicatePolicy string + Labels map[string]string + IgnoreMaxTimeDiff int64 + IgnoreMaxValDiff float64 } type TSAlterOptions struct { - Retention int - ChunkSize int - DuplicatePolicy string - Labels map[string]string + Retention int + ChunkSize int + DuplicatePolicy string + Labels map[string]string + IgnoreMaxTimeDiff int64 + IgnoreMaxValDiff float64 } type TSCreateRuleOptions struct { @@ -223,6 +230,9 @@ func (c cmdable) TSAddWithArgs(ctx context.Context, key string, timestamp interf args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) @@ -264,6 +274,9 @@ func (c cmdable) TSCreateWithArgs(ctx context.Context, key string, options *TSOp args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) @@ -292,6 +305,9 @@ func (c cmdable) TSAlter(ctx context.Context, key string, options *TSAlterOption args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) @@ -351,12 +367,18 @@ func (c cmdable) TSIncrByWithArgs(ctx context.Context, key string, timestamp flo if options.Uncompressed { args = append(args, "UNCOMPRESSED") } + if options.DuplicatePolicy != "" { + args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy) + } if options.Labels != nil { args = append(args, "LABELS") for label, value := range options.Labels { args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) @@ -391,12 +413,18 @@ func (c cmdable) TSDecrByWithArgs(ctx context.Context, key string, timestamp flo if options.Uncompressed { args = append(args, "UNCOMPRESSED") } + if options.DuplicatePolicy != "" { + args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy) + } if options.Labels != nil { args = append(args, "LABELS") for label, value := range options.Labels { args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) diff --git a/vendor/github.com/redis/go-redis/v9/tx.go b/vendor/github.com/redis/go-redis/v9/tx.go index 039eaf351..40bc1d661 100644 --- a/vendor/github.com/redis/go-redis/v9/tx.go +++ b/vendor/github.com/redis/go-redis/v9/tx.go @@ -19,16 +19,16 @@ type Tx struct { baseClient cmdable statefulCmdable - hooksMixin } func (c *Client) newTx() *Tx { tx := Tx{ baseClient: baseClient{ - opt: c.opt, - connPool: pool.NewStickyConnPool(c.connPool), + opt: c.opt.clone(), // Clone options to avoid sharing mutable state between transaction and parent client + connPool: pool.NewStickyConnPool(c.connPool), + hooksMixin: c.hooksMixin.clone(), + pushProcessor: c.pushProcessor, // Copy push processor from parent client }, - hooksMixin: c.hooksMixin.clone(), } tx.init() return &tx diff --git a/vendor/github.com/redis/go-redis/v9/universal.go b/vendor/github.com/redis/go-redis/v9/universal.go index 3e482307c..1dc9764dc 100644 --- a/vendor/github.com/redis/go-redis/v9/universal.go +++ b/vendor/github.com/redis/go-redis/v9/universal.go @@ -5,6 +5,9 @@ import ( "crypto/tls" "net" "time" + + "github.com/redis/go-redis/v9/auth" + "github.com/redis/go-redis/v9/maintnotifications" ) // UniversalOptions information is required by UniversalClient to establish @@ -26,9 +29,27 @@ type UniversalOptions struct { Dialer func(ctx context.Context, network, addr string) (net.Conn, error) OnConnect func(ctx context.Context, cn *Conn) error - Protocol int - Username string - Password string + Protocol int + Username string + Password string + // CredentialsProvider allows the username and password to be updated + // before reconnecting. It should return the current username and password. + CredentialsProvider func() (username string, password string) + + // CredentialsProviderContext is an enhanced parameter of CredentialsProvider, + // done to maintain API compatibility. In the future, + // there might be a merge between CredentialsProviderContext and CredentialsProvider. + // There will be a conflict between them; if CredentialsProviderContext exists, we will ignore CredentialsProvider. + CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) + + // StreamingCredentialsProvider is used to retrieve the credentials + // for the connection from an external source. Those credentials may change + // during the connection lifetime. This is useful for managed identity + // scenarios where the credentials are retrieved from an external source. + // + // Currently, this is a placeholder for the future implementation. + StreamingCredentialsProvider auth.StreamingCredentialsProvider + SentinelUsername string SentinelPassword string @@ -41,6 +62,20 @@ type UniversalOptions struct { WriteTimeout time.Duration ContextTimeoutEnabled bool + // ReadBufferSize is the size of the bufio.Reader buffer for each connection. + // Larger buffers can improve performance for commands that return large responses. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + ReadBufferSize int + + // WriteBufferSize is the size of the bufio.Writer buffer for each connection. + // Larger buffers can improve performance for large pipelines and commands with many arguments. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + WriteBufferSize int + // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). PoolFIFO bool @@ -61,9 +96,8 @@ type UniversalOptions struct { RouteByLatency bool RouteRandomly bool - // The sentinel master name. - // Only failover clients. - + // MasterName is the sentinel master name. + // Only for failover clients. MasterName string // DisableIndentity - Disable set-lib on connect. @@ -79,6 +113,19 @@ type UniversalOptions struct { DisableIdentity bool IdentitySuffix string + + // FailingTimeoutSeconds is the timeout in seconds for marking a cluster node as failing. + // When a node is marked as failing, it will be avoided for this duration. + // Only applies to cluster clients. Default is 15 seconds. + FailingTimeoutSeconds int + + UnstableResp3 bool + + // IsClusterMode can be used when only one Addrs is provided (e.g. Elasticache supports setting up cluster mode with configuration endpoint). + IsClusterMode bool + + // MaintNotificationsConfig provides configuration for maintnotifications upgrades. + MaintNotificationsConfig *maintnotifications.Config } // Cluster returns cluster options created from the universal options. @@ -93,9 +140,12 @@ func (o *UniversalOptions) Cluster() *ClusterOptions { Dialer: o.Dialer, OnConnect: o.OnConnect, - Protocol: o.Protocol, - Username: o.Username, - Password: o.Password, + Protocol: o.Protocol, + Username: o.Username, + Password: o.Password, + CredentialsProvider: o.CredentialsProvider, + CredentialsProviderContext: o.CredentialsProviderContext, + StreamingCredentialsProvider: o.StreamingCredentialsProvider, MaxRedirects: o.MaxRedirects, ReadOnly: o.ReadOnly, @@ -111,6 +161,9 @@ func (o *UniversalOptions) Cluster() *ClusterOptions { WriteTimeout: o.WriteTimeout, ContextTimeoutEnabled: o.ContextTimeoutEnabled, + ReadBufferSize: o.ReadBufferSize, + WriteBufferSize: o.WriteBufferSize, + PoolFIFO: o.PoolFIFO, PoolSize: o.PoolSize, @@ -123,9 +176,12 @@ func (o *UniversalOptions) Cluster() *ClusterOptions { TLSConfig: o.TLSConfig, - DisableIdentity: o.DisableIdentity, - DisableIndentity: o.DisableIndentity, - IdentitySuffix: o.IdentitySuffix, + DisableIdentity: o.DisableIdentity, + DisableIndentity: o.DisableIndentity, + IdentitySuffix: o.IdentitySuffix, + FailingTimeoutSeconds: o.FailingTimeoutSeconds, + UnstableResp3: o.UnstableResp3, + MaintNotificationsConfig: o.MaintNotificationsConfig, } } @@ -143,13 +199,20 @@ func (o *UniversalOptions) Failover() *FailoverOptions { Dialer: o.Dialer, OnConnect: o.OnConnect, - DB: o.DB, - Protocol: o.Protocol, - Username: o.Username, - Password: o.Password, + DB: o.DB, + Protocol: o.Protocol, + Username: o.Username, + Password: o.Password, + CredentialsProvider: o.CredentialsProvider, + CredentialsProviderContext: o.CredentialsProviderContext, + StreamingCredentialsProvider: o.StreamingCredentialsProvider, + SentinelUsername: o.SentinelUsername, SentinelPassword: o.SentinelPassword, + RouteByLatency: o.RouteByLatency, + RouteRandomly: o.RouteRandomly, + MaxRetries: o.MaxRetries, MinRetryBackoff: o.MinRetryBackoff, MaxRetryBackoff: o.MaxRetryBackoff, @@ -159,6 +222,9 @@ func (o *UniversalOptions) Failover() *FailoverOptions { WriteTimeout: o.WriteTimeout, ContextTimeoutEnabled: o.ContextTimeoutEnabled, + ReadBufferSize: o.ReadBufferSize, + WriteBufferSize: o.WriteBufferSize, + PoolFIFO: o.PoolFIFO, PoolSize: o.PoolSize, PoolTimeout: o.PoolTimeout, @@ -175,6 +241,8 @@ func (o *UniversalOptions) Failover() *FailoverOptions { DisableIdentity: o.DisableIdentity, DisableIndentity: o.DisableIndentity, IdentitySuffix: o.IdentitySuffix, + UnstableResp3: o.UnstableResp3, + // Note: MaintNotificationsConfig not supported for FailoverOptions } } @@ -191,10 +259,13 @@ func (o *UniversalOptions) Simple() *Options { Dialer: o.Dialer, OnConnect: o.OnConnect, - DB: o.DB, - Protocol: o.Protocol, - Username: o.Username, - Password: o.Password, + DB: o.DB, + Protocol: o.Protocol, + Username: o.Username, + Password: o.Password, + CredentialsProvider: o.CredentialsProvider, + CredentialsProviderContext: o.CredentialsProviderContext, + StreamingCredentialsProvider: o.StreamingCredentialsProvider, MaxRetries: o.MaxRetries, MinRetryBackoff: o.MinRetryBackoff, @@ -205,6 +276,9 @@ func (o *UniversalOptions) Simple() *Options { WriteTimeout: o.WriteTimeout, ContextTimeoutEnabled: o.ContextTimeoutEnabled, + ReadBufferSize: o.ReadBufferSize, + WriteBufferSize: o.WriteBufferSize, + PoolFIFO: o.PoolFIFO, PoolSize: o.PoolSize, PoolTimeout: o.PoolTimeout, @@ -216,9 +290,11 @@ func (o *UniversalOptions) Simple() *Options { TLSConfig: o.TLSConfig, - DisableIdentity: o.DisableIdentity, - DisableIndentity: o.DisableIndentity, - IdentitySuffix: o.IdentitySuffix, + DisableIdentity: o.DisableIdentity, + DisableIndentity: o.DisableIndentity, + IdentitySuffix: o.IdentitySuffix, + UnstableResp3: o.UnstableResp3, + MaintNotificationsConfig: o.MaintNotificationsConfig, } } @@ -250,14 +326,26 @@ var ( // NewUniversalClient returns a new multi client. The type of the returned client depends // on the following conditions: // -// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned. -// 2. if the number of Addrs is two or more, a ClusterClient is returned. -// 3. Otherwise, a single-node Client is returned. +// 1. If the MasterName option is specified with RouteByLatency, RouteRandomly or IsClusterMode, +// a FailoverClusterClient is returned. +// 2. If the MasterName option is specified without RouteByLatency, RouteRandomly or IsClusterMode, +// a sentinel-backed FailoverClient is returned. +// 3. If the number of Addrs is two or more, or IsClusterMode option is specified, +// a ClusterClient is returned. +// 4. Otherwise, a single-node Client is returned. func NewUniversalClient(opts *UniversalOptions) UniversalClient { - if opts.MasterName != "" { + if opts == nil { + panic("redis: NewUniversalClient nil options") + } + + switch { + case opts.MasterName != "" && (opts.RouteByLatency || opts.RouteRandomly || opts.IsClusterMode): + return NewFailoverClusterClient(opts.Failover()) + case opts.MasterName != "": return NewFailoverClient(opts.Failover()) - } else if len(opts.Addrs) > 1 { + case len(opts.Addrs) > 1 || opts.IsClusterMode: return NewClusterClient(opts.Cluster()) + default: + return NewClient(opts.Simple()) } - return NewClient(opts.Simple()) } diff --git a/vendor/github.com/redis/go-redis/v9/vectorset_commands.go b/vendor/github.com/redis/go-redis/v9/vectorset_commands.go new file mode 100644 index 000000000..8f99de073 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/vectorset_commands.go @@ -0,0 +1,358 @@ +package redis + +import ( + "context" + "encoding/json" + "strconv" +) + +// note: the APIs is experimental and may be subject to change. +type VectorSetCmdable interface { + VAdd(ctx context.Context, key, element string, val Vector) *BoolCmd + VAddWithArgs(ctx context.Context, key, element string, val Vector, addArgs *VAddArgs) *BoolCmd + VCard(ctx context.Context, key string) *IntCmd + VDim(ctx context.Context, key string) *IntCmd + VEmb(ctx context.Context, key, element string, raw bool) *SliceCmd + VGetAttr(ctx context.Context, key, element string) *StringCmd + VInfo(ctx context.Context, key string) *MapStringInterfaceCmd + VLinks(ctx context.Context, key, element string) *StringSliceCmd + VLinksWithScores(ctx context.Context, key, element string) *VectorScoreSliceCmd + VRandMember(ctx context.Context, key string) *StringCmd + VRandMemberCount(ctx context.Context, key string, count int) *StringSliceCmd + VRem(ctx context.Context, key, element string) *BoolCmd + VSetAttr(ctx context.Context, key, element string, attr interface{}) *BoolCmd + VClearAttributes(ctx context.Context, key, element string) *BoolCmd + VSim(ctx context.Context, key string, val Vector) *StringSliceCmd + VSimWithScores(ctx context.Context, key string, val Vector) *VectorScoreSliceCmd + VSimWithArgs(ctx context.Context, key string, val Vector, args *VSimArgs) *StringSliceCmd + VSimWithArgsWithScores(ctx context.Context, key string, val Vector, args *VSimArgs) *VectorScoreSliceCmd + VRange(ctx context.Context, key, start, end string, count int64) *StringSliceCmd +} + +type Vector interface { + Value() []any +} + +const ( + vectorFormatFP32 string = "FP32" + vectorFormatValues string = "Values" +) + +type VectorFP32 struct { + Val []byte +} + +func (v *VectorFP32) Value() []any { + return []any{vectorFormatFP32, v.Val} +} + +var _ Vector = (*VectorFP32)(nil) + +type VectorValues struct { + Val []float64 +} + +func (v *VectorValues) Value() []any { + res := make([]any, 2+len(v.Val)) + res[0] = vectorFormatValues + res[1] = len(v.Val) + for i, v := range v.Val { + res[2+i] = v + } + return res +} + +var _ Vector = (*VectorValues)(nil) + +type VectorRef struct { + Name string // the name of the referent vector +} + +func (v *VectorRef) Value() []any { + return []any{"ele", v.Name} +} + +var _ Vector = (*VectorRef)(nil) + +type VectorScore struct { + Name string + Score float64 +} + +// `VADD key (FP32 | VALUES num) vector element` +// note: the API is experimental and may be subject to change. +func (c cmdable) VAdd(ctx context.Context, key, element string, val Vector) *BoolCmd { + return c.VAddWithArgs(ctx, key, element, val, &VAddArgs{}) +} + +type VAddArgs struct { + // the REDUCE option must be passed immediately after the key + Reduce int64 + Cas bool + + // The NoQuant, Q8 and Bin options are mutually exclusive. + NoQuant bool + Q8 bool + Bin bool + + EF int64 + SetAttr string + M int64 +} + +func (v VAddArgs) reduce() int64 { + return v.Reduce +} + +func (v VAddArgs) appendArgs(args []any) []any { + if v.Cas { + args = append(args, "cas") + } + + if v.NoQuant { + args = append(args, "noquant") + } else if v.Q8 { + args = append(args, "q8") + } else if v.Bin { + args = append(args, "bin") + } + + if v.EF > 0 { + args = append(args, "ef", strconv.FormatInt(v.EF, 10)) + } + if len(v.SetAttr) > 0 { + args = append(args, "setattr", v.SetAttr) + } + if v.M > 0 { + args = append(args, "m", strconv.FormatInt(v.M, 10)) + } + return args +} + +// `VADD key [REDUCE dim] (FP32 | VALUES num) vector element [CAS] [NOQUANT | Q8 | BIN] [EF build-exploration-factor] [SETATTR attributes] [M numlinks]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VAddWithArgs(ctx context.Context, key, element string, val Vector, addArgs *VAddArgs) *BoolCmd { + if addArgs == nil { + addArgs = &VAddArgs{} + } + args := []any{"vadd", key} + if addArgs.reduce() > 0 { + args = append(args, "reduce", addArgs.reduce()) + } + args = append(args, val.Value()...) + args = append(args, element) + args = addArgs.appendArgs(args) + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// `VCARD key` +// note: the API is experimental and may be subject to change. +func (c cmdable) VCard(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "vcard", key) + _ = c(ctx, cmd) + return cmd +} + +// `VDIM key` +// note: the API is experimental and may be subject to change. +func (c cmdable) VDim(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "vdim", key) + _ = c(ctx, cmd) + return cmd +} + +// `VEMB key element [RAW]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VEmb(ctx context.Context, key, element string, raw bool) *SliceCmd { + args := []any{"vemb", key, element} + if raw { + args = append(args, "raw") + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// `VGETATTR key element` +// note: the API is experimental and may be subject to change. +func (c cmdable) VGetAttr(ctx context.Context, key, element string) *StringCmd { + cmd := NewStringCmd(ctx, "vgetattr", key, element) + _ = c(ctx, cmd) + return cmd +} + +// `VINFO key` +// note: the API is experimental and may be subject to change. +func (c cmdable) VInfo(ctx context.Context, key string) *MapStringInterfaceCmd { + cmd := NewMapStringInterfaceCmd(ctx, "vinfo", key) + _ = c(ctx, cmd) + return cmd +} + +// `VLINKS key element` +// note: the API is experimental and may be subject to change. +func (c cmdable) VLinks(ctx context.Context, key, element string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "vlinks", key, element) + _ = c(ctx, cmd) + return cmd +} + +// `VLINKS key element WITHSCORES` +// note: the API is experimental and may be subject to change. +func (c cmdable) VLinksWithScores(ctx context.Context, key, element string) *VectorScoreSliceCmd { + cmd := NewVectorInfoSliceCmd(ctx, "vlinks", key, element, "withscores") + _ = c(ctx, cmd) + return cmd +} + +// `VRANDMEMBER key` +// note: the API is experimental and may be subject to change. +func (c cmdable) VRandMember(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "vrandmember", key) + _ = c(ctx, cmd) + return cmd +} + +// `VRANDMEMBER key [count]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VRandMemberCount(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "vrandmember", key, count) + _ = c(ctx, cmd) + return cmd +} + +// `VREM key element` +// note: the API is experimental and may be subject to change. +func (c cmdable) VRem(ctx context.Context, key, element string) *BoolCmd { + cmd := NewBoolCmd(ctx, "vrem", key, element) + _ = c(ctx, cmd) + return cmd +} + +// `VSETATTR key element "{ JSON obj }"` +// The `attr` must be something that can be marshaled to JSON (using encoding/JSON) unless +// the argument is a string or []byte when we assume that it can be passed directly as JSON. +// +// note: the API is experimental and may be subject to change. +func (c cmdable) VSetAttr(ctx context.Context, key, element string, attr interface{}) *BoolCmd { + var attrStr string + var err error + switch v := attr.(type) { + case string: + attrStr = v + case []byte: + attrStr = string(v) + default: + var bytes []byte + bytes, err = json.Marshal(v) + if err != nil { + // If marshalling fails, create the command and set the error; this command won't be executed. + cmd := NewBoolCmd(ctx, "vsetattr", key, element, "") + cmd.SetErr(err) + return cmd + } + attrStr = string(bytes) + } + cmd := NewBoolCmd(ctx, "vsetattr", key, element, attrStr) + _ = c(ctx, cmd) + return cmd +} + +// `VClearAttributes` clear attributes on a vector set element. +// The implementation of `VClearAttributes` is execute command `VSETATTR key element ""`. +// note: the API is experimental and may be subject to change. +func (c cmdable) VClearAttributes(ctx context.Context, key, element string) *BoolCmd { + cmd := NewBoolCmd(ctx, "vsetattr", key, element, "") + _ = c(ctx, cmd) + return cmd +} + +// `VSIM key (ELE | FP32 | VALUES num) (vector | element)` +// note: the API is experimental and may be subject to change. +func (c cmdable) VSim(ctx context.Context, key string, val Vector) *StringSliceCmd { + return c.VSimWithArgs(ctx, key, val, &VSimArgs{}) +} + +// `VSIM key (ELE | FP32 | VALUES num) (vector | element) WITHSCORES` +// note: the API is experimental and may be subject to change. +func (c cmdable) VSimWithScores(ctx context.Context, key string, val Vector) *VectorScoreSliceCmd { + return c.VSimWithArgsWithScores(ctx, key, val, &VSimArgs{}) +} + +type VSimArgs struct { + Count int64 + EF int64 + Filter string + FilterEF int64 + Truth bool + NoThread bool + Epsilon float64 +} + +func (v VSimArgs) appendArgs(args []any) []any { + if v.Count > 0 { + args = append(args, "count", v.Count) + } + if v.EF > 0 { + args = append(args, "ef", v.EF) + } + if len(v.Filter) > 0 { + args = append(args, "filter", v.Filter) + } + if v.FilterEF > 0 { + args = append(args, "filter-ef", v.FilterEF) + } + if v.Truth { + args = append(args, "truth") + } + if v.NoThread { + args = append(args, "nothread") + } + if v.Epsilon > 0 { + args = append(args, "Epsilon", v.Epsilon) + } + return args +} + +// `VSIM key (ELE | FP32 | VALUES num) (vector | element) [COUNT num] [EPSILON delta] +// [EF search-exploration-factor] [FILTER expression] [FILTER-EF max-filtering-effort] [TRUTH] [NOTHREAD]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VSimWithArgs(ctx context.Context, key string, val Vector, simArgs *VSimArgs) *StringSliceCmd { + if simArgs == nil { + simArgs = &VSimArgs{} + } + args := []any{"vsim", key} + args = append(args, val.Value()...) + args = simArgs.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// `VSIM key (ELE | FP32 | VALUES num) (vector | element) [WITHSCORES] [COUNT num] [EPSILON delta] +// [EF search-exploration-factor] [FILTER expression] [FILTER-EF max-filtering-effort] [TRUTH] [NOTHREAD]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VSimWithArgsWithScores(ctx context.Context, key string, val Vector, simArgs *VSimArgs) *VectorScoreSliceCmd { + if simArgs == nil { + simArgs = &VSimArgs{} + } + args := []any{"vsim", key} + args = append(args, val.Value()...) + args = append(args, "withscores") + args = simArgs.appendArgs(args) + cmd := NewVectorInfoSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// `VRANGE key start end count` +// a negative count means to return all the elements in the vector set. +// note: the API is experimental and may be subject to change. +func (c cmdable) VRange(ctx context.Context, key, start, end string, count int64) *StringSliceCmd { + args := []any{"vrange", key, start, end, count} + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} diff --git a/vendor/github.com/redis/go-redis/v9/version.go b/vendor/github.com/redis/go-redis/v9/version.go index 2cb9dd435..126fa10b0 100644 --- a/vendor/github.com/redis/go-redis/v9/version.go +++ b/vendor/github.com/redis/go-redis/v9/version.go @@ -2,5 +2,5 @@ package redis // Version is the current release version. func Version() string { - return "9.6.3" + return "9.17.2" } diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go index e854d7e84..2950fdb42 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) { } // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst []byte, src []byte) error { +func unmarshalJSON(dst, src []byte) error { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { src = src[1 : l-1] } diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go index 29e629d66..5bb3b16c7 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -41,7 +41,7 @@ func (i *protoInt64) UnmarshalJSON(data []byte) error { // strings or integers. type protoUint64 uint64 -// Int64 returns the protoUint64 as a uint64. +// Uint64 returns the protoUint64 as a uint64. func (i *protoUint64) Uint64() uint64 { return uint64(*i) } // UnmarshalJSON decodes both strings and integers. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go index a13a6b733..67f80b6aa 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "io" + "math" "time" ) @@ -151,8 +152,8 @@ func (s Span) MarshalJSON() ([]byte, error) { }{ Alias: Alias(s), ParentSpanID: parentSpanId, - StartTime: uint64(startT), - EndTime: uint64(endT), + StartTime: uint64(startT), // nolint:gosec // >0 checked above. + EndTime: uint64(endT), // nolint:gosec // >0 checked above. }) } @@ -201,11 +202,13 @@ func (s *Span) UnmarshalJSON(data []byte) error { case "startTimeUnixNano", "start_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - s.StartTime = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + s.StartTime = time.Unix(0, v) case "endTimeUnixNano", "end_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - s.EndTime = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + s.EndTime = time.Unix(0, v) case "attributes": err = decoder.Decode(&s.Attrs) case "droppedAttributesCount", "dropped_attributes_count": @@ -248,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error { type SpanFlags int32 const ( + // SpanFlagsTraceFlagsMask is a mask for trace-flags. + // // Bits 0-7 are used for trace flags. SpanFlagsTraceFlagsMask SpanFlags = 255 - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. SpanFlagsContextHasIsRemoteMask SpanFlags = 256 - // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is + // remote. SpanFlagsContextIsRemoteMask SpanFlags = 512 ) @@ -263,26 +273,30 @@ const ( type SpanKind int32 const ( - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. + // SpanKindInternal indicates that the span represents an internal + // operation within an application, as opposed to an operation happening at + // the boundaries. SpanKindInternal SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. + // SpanKindServer indicates that the span covers server-side handling of an + // RPC or other remote network request. SpanKindServer SpanKind = 2 - // Indicates that the span describes a request to some remote service. + // SpanKindClient indicates that the span describes a request to some + // remote service. SpanKindClient SpanKind = 3 - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. + // SpanKindProducer indicates that the span describes a producer sending a + // message to a broker. Unlike SpanKindClient and SpanKindServer, there is + // often no direct critical path latency relationship between producer and + // consumer spans. A SpanKindProducer span ends when the message was + // accepted by the broker while the logical processing of the message might + // span a much longer time. SpanKindProducer SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. + // SpanKindConsumer indicates that the span describes a consumer receiving + // a message from a broker. Like SpanKindProducer, there is often no direct + // critical path latency relationship between producer and consumer spans. SpanKindConsumer SpanKind = 5 ) -// Event is a time-stamped annotation of the span, consisting of user-supplied +// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied // text description and key-value pairs. type SpanEvent struct { // time_unix_nano is the time the event occurred. @@ -312,7 +326,7 @@ func (e SpanEvent) MarshalJSON() ([]byte, error) { Time uint64 `json:"timeUnixNano,omitempty"` }{ Alias: Alias(e), - Time: uint64(t), + Time: uint64(t), //nolint:gosec // >0 checked above }) } @@ -347,7 +361,8 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { case "timeUnixNano", "time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - se.Time = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + se.Time = time.Unix(0, v) case "name": err = decoder.Decode(&se.Name) case "attributes": @@ -365,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { return nil } -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. +// SpanLink is a reference from the current span to another span in the same +// trace or in a different trace. For example, this can be used in batching +// operations, where a single batch handler processes multiple requests from +// different traces or when the handler receives a request from a different +// project. type SpanLink struct { // A unique identifier of a trace that this linked span is part of. The ID is a // 16-byte array. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go index 1217776ea..a2802764f 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -3,17 +3,19 @@ package telemetry +// StatusCode is the status of a Span. +// // For the semantics of status codes see // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status type StatusCode int32 const ( - // The default status. + // StatusCodeUnset is the default status. StatusCodeUnset StatusCode = 0 - // The Span has been validated by an Application developer or Operator to - // have completed successfully. + // StatusCodeOK is used when the Span has been validated by an Application + // developer or Operator to have completed successfully. StatusCodeOK StatusCode = 1 - // The Span contains an error. + // StatusCodeError is used when the Span contains an error. StatusCodeError StatusCode = 2 ) diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go index 69a348f0f..44197b808 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error { return nil } -// A collection of ScopeSpans from a Resource. +// ResourceSpans is a collection of ScopeSpans from a Resource. type ResourceSpans struct { // The resource for the spans in this message. // If this field is not set then no resource info is known. @@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { return nil } -// A collection of Spans produced by an InstrumentationScope. +// ScopeSpans is a collection of Spans produced by an InstrumentationScope. type ScopeSpans struct { // The instrumentation scope information for the spans in this message. // Semantically when InstrumentationScope isn't set, it is equivalent with diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go index 0dd01b063..022768bb5 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -1,8 +1,6 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -//go:generate stringer -type=ValueKind -trimprefix=ValueKind - package telemetry import ( @@ -23,7 +21,7 @@ import ( // A zero value is valid and represents an empty value. type Value struct { // Ensure forward compatibility by explicitly making this not comparable. - noCmp [0]func() //nolint: unused // This is indeed used. + noCmp [0]func() //nolint:unused // This is indeed used. // num holds the value for Int64, Float64, and Bool. It holds the length // for String, Bytes, Slice, Map. @@ -92,7 +90,7 @@ func IntValue(v int) Value { return Int64Value(int64(v)) } // Int64Value returns a [Value] for an int64. func Int64Value(v int64) Value { - return Value{num: uint64(v), any: ValueKindInt64} + return Value{num: uint64(v), any: ValueKindInt64} //nolint:gosec // Raw value conv. } // Float64Value returns a [Value] for a float64. @@ -164,7 +162,7 @@ func (v Value) AsInt64() int64 { // this will return garbage. func (v Value) asInt64() int64 { // Assumes v.num was a valid int64 (overflow not checked). - return int64(v.num) // nolint: gosec + return int64(v.num) //nolint:gosec // Bounded. } // AsBool returns the value held by v as a bool. @@ -309,13 +307,13 @@ func (v Value) String() string { return v.asString() case ValueKindInt64: // Assumes v.num was a valid int64 (overflow not checked). - return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + return strconv.FormatInt(int64(v.num), 10) //nolint:gosec // Bounded. case ValueKindFloat64: return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) case ValueKindBool: return strconv.FormatBool(v.asBool()) case ValueKindBytes: - return fmt.Sprint(v.asBytes()) + return string(v.asBytes()) case ValueKindMap: return fmt.Sprint(v.asMap()) case ValueKindSlice: @@ -343,7 +341,7 @@ func (v *Value) MarshalJSON() ([]byte, error) { case ValueKindInt64: return json.Marshal(struct { Value string `json:"intValue"` - }{strconv.FormatInt(int64(v.num), 10)}) + }{strconv.FormatInt(int64(v.num), 10)}) //nolint:gosec // Raw value conv. case ValueKindFloat64: return json.Marshal(struct { Value float64 `json:"doubleValue"` diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go index 6ebea12a9..815d271ff 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -6,6 +6,7 @@ package sdk import ( "encoding/json" "fmt" + "math" "reflect" "runtime" "strings" @@ -16,7 +17,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/noop" @@ -85,7 +86,12 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { limit := maxSpan.Attrs if limit == 0 { // No attributes allowed. - s.span.DroppedAttrs += uint32(len(attrs)) + n := int64(len(attrs)) + if n > 0 { + s.span.DroppedAttrs += uint32( //nolint:gosec // Bounds checked. + min(n, math.MaxUint32), + ) + } return } @@ -121,8 +127,13 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { // convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The // number of dropped attributes is also returned. func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + n := len(attrs) if limit == 0 { - return nil, uint32(len(attrs)) + var out uint32 + if n > 0 { + out = uint32(min(int64(n), math.MaxUint32)) //nolint:gosec // Bounds checked. + } + return nil, out } if limit < 0 { @@ -130,8 +141,12 @@ func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, u return convAttrs(attrs), 0 } - limit = min(len(attrs), limit) - return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) + if n < 0 { + n = 0 + } + + limit = min(n, limit) + return convAttrs(attrs[:limit]), uint32(n - limit) //nolint:gosec // Bounds checked. } func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go index cbcfabde3..e09acf022 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/tracer.go +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -5,6 +5,7 @@ package sdk import ( "context" + "math" "time" "go.opentelemetry.io/otel/trace" @@ -21,15 +22,20 @@ type tracer struct { var _ trace.Tracer = tracer{} -func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - var psc trace.SpanContext +func (t tracer) Start( + ctx context.Context, + name string, + opts ...trace.SpanStartOption, +) (context.Context, trace.Span) { + var psc, sc trace.SpanContext sampled := true span := new(span) // Ask eBPF for sampling decision and span context info. - t.start(ctx, span, &psc, &sampled, &span.spanContext) + t.start(ctx, span, &psc, &sampled, &sc) span.sampled.Store(sampled) + span.spanContext = sc ctx = trace.ContextWithSpan(ctx, span) @@ -58,7 +64,13 @@ func (t *tracer) start( // start is used for testing. var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} -func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { +var intToUint32Bound = min(math.MaxInt, math.MaxUint32) + +func (t tracer) traces( + name string, + cfg trace.SpanConfig, + sc, psc trace.SpanContext, +) (*telemetry.Traces, *telemetry.Span) { span := &telemetry.Span{ TraceID: telemetry.TraceID(sc.TraceID()), SpanID: telemetry.SpanID(sc.SpanID()), @@ -73,11 +85,16 @@ func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanCont links := cfg.Links() if limit := maxSpan.Links; limit == 0 { - span.DroppedLinks = uint32(len(links)) + n := len(links) + if n > 0 { + bounded := max(min(n, intToUint32Bound), 0) + span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. + } } else { if limit > 0 { n := max(len(links)-limit, 0) - span.DroppedLinks = uint32(n) + bounded := min(n, intToUint32Bound) + span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. links = links[n:] } span.Links = convLinks(links) diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 6bf3abc41..a6d0cbcc9 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -7,3 +7,5 @@ ans nam valu thirdparty +addOpt +observ diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 5f69cc027..1b1b2aff9 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -10,6 +10,7 @@ linters: - depguard - errcheck - errorlint + - gocritic - godot - gosec - govet @@ -86,6 +87,18 @@ linters: deny: - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal desc: Do not use cross-module internal packages. + gocritic: + disabled-checks: + - appendAssign + - commentedOutCode + - dupArg + - hugeParam + - importShadow + - preferDecodeRune + - rangeValCopy + - unnamedResult + - whyNoLint + enable-all: true godot: exclude: # Exclude links. @@ -167,7 +180,10 @@ linters: - fmt.Print - fmt.Printf - fmt.Println + - name: unused-parameter + - name: unused-receiver - name: unnecessary-stmt + - name: use-any - name: useless-break - name: var-declaration - name: var-naming @@ -181,6 +197,9 @@ linters: - float-compare - go-require - require-error + usetesting: + context-background: true + context-todo: true exclusions: generated: lax presets: @@ -224,10 +243,6 @@ linters: - linters: - gosec text: 'G402: TLS MinVersion too low.' - paths: - - third_party$ - - builtin$ - - examples$ issues: max-issues-per-linter: 0 max-same-issues: 0 @@ -237,14 +252,12 @@ formatters: - goimports - golines settings: + gofumpt: + extra-rules: true goimports: local-prefixes: - - go.opentelemetry.io + - go.opentelemetry.io/otel golines: max-len: 120 exclusions: generated: lax - paths: - - third_party$ - - builtin$ - - examples$ diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore index 40d62fa2e..994b677df 100644 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -1,6 +1,13 @@ http://localhost +https://localhost http://jaeger-collector https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/projects +# Weaver model URL for semantic-conventions repository. +https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+] file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual +http://4.3.2.1:78/user/123 +file:///home/runner/work/opentelemetry-go/opentelemetry-go/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/dns:/:4317 +# URL works, but it has blocked link checkers. +https://dl.acm.org/doi/10.1145/198429.198435 diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 4acc75701..ecbe0582c 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,161 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.39.0/0.61.0/0.15.0/0.0.14] 2025-12-05 + +### Added + +- Greatly reduce the cost of recording metrics in `go.opentelemetry.io/otel/sdk/metric` using hashing for map keys. (#7175) +- Add `WithInstrumentationAttributeSet` option to `go.opentelemetry.io/otel/log`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/trace` packages. + This provides a concurrent-safe and performant alternative to `WithInstrumentationAttributes` by accepting a pre-constructed `attribute.Set`. (#7287) +- Add experimental observability for the Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus`. + Check the `go.opentelemetry.io/otel/exporters/prometheus/internal/x` package documentation for more information. (#7345) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7353) +- Add temporality selector functions `DeltaTemporalitySelector`, `CumulativeTemporalitySelector`, `LowMemoryTemporalitySelector` to `go.opentelemetry.io/otel/sdk/metric`. (#7434) +- Add experimental observability metrics for simple log processor in `go.opentelemetry.io/otel/sdk/log`. (#7548) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7459) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7486) +- Add experimental observability metrics for simple span processor in `go.opentelemetry.io/otel/sdk/trace`. (#7374) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7512) +- Add experimental observability metrics for manual reader in `go.opentelemetry.io/otel/sdk/metric`. (#7524) +- Add experimental observability metrics for periodic reader in `go.opentelemetry.io/otel/sdk/metric`. (#7571) +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environmental variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7608) +- Add `Enabled` method to the `Processor` interface in `go.opentelemetry.io/otel/sdk/log`. + All `Processor` implementations now include an `Enabled` method. (#7639) +- The `go.opentelemetry.io/otel/semconv/v1.38.0` package. + The package contains semantic conventions from the `v1.38.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.38.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.37.0.`(#7648) + +### Changed + +- `Distinct` in `go.opentelemetry.io/otel/attribute` is no longer guaranteed to uniquely identify an attribute set. + Collisions between `Distinct` values for different Sets are possible with extremely high cardinality (billions of series per instrument), but are highly unlikely. (#7175) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/trace` synchronously de-duplicates the passed attributes instead of delegating it to the returned `TracerOption`. (#7266) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/meter` synchronously de-duplicates the passed attributes instead of delegating it to the returned `MeterOption`. (#7266) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/log` synchronously de-duplicates the passed attributes instead of delegating it to the returned `LoggerOption`. (#7266) +- Rename the `OTEL_GO_X_SELF_OBSERVABILITY` environment variable to `OTEL_GO_X_OBSERVABILITY` in `go.opentelemetry.io/otel/sdk/trace`, `go.opentelemetry.io/otel/sdk/log`, and `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7302) +- Improve performance of histogram `Record` in `go.opentelemetry.io/otel/sdk/metric` when min and max are disabled using `NoMinMax`. (#7306) +- Improve error handling for dropped data during translation by using `prometheus.NewInvalidMetric` in `go.opentelemetry.io/otel/exporters/prometheus`. + ⚠️ **Breaking Change:** Previously, these cases were only logged and scrapes succeeded. + Now, when translation would drop data (e.g., invalid label/value), the exporter emits a `NewInvalidMetric`, and Prometheus scrapes **fail with HTTP 500** by default. + To preserve the prior behavior (scrapes succeed while errors are logged), configure your Prometheus HTTP handler with: `promhttp.HandlerOpts{ ErrorHandling: promhttp.ContinueOnError }`. (#7363) +- Replace fnv hash with xxhash in `go.opentelemetry.io/otel/attribute` for better performance. (#7371) +- The default `TranslationStrategy` in `go.opentelemetry.io/exporters/prometheus` is changed from `otlptranslator.NoUTF8EscapingWithSuffixes` to `otlptranslator.UnderscoreEscapingWithSuffixes`. (#7421) +- Improve performance of concurrent measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7427) +- Include W3C TraceFlags (bits 0–7) in the OTLP `Span.Flags` field in `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracehttp` and `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracegrpc`. (#7438) +- The `ErrorType` function in `go.opentelemetry.io/otel/semconv/v1.37.0` now handles custom error types. + If an error implements an `ErrorType() string` method, the return value of that method will be used as the error type. (#7442) + +### Fixed + +- Fix `WithInstrumentationAttributes` options in `go.opentelemetry.io/otel/trace`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/log` to properly merge attributes when passed multiple times instead of replacing them. + Attributes with duplicate keys will use the last value passed. (#7300) +- The equality of `attribute.Set` when using the `Equal` method is not affected by the user overriding the empty set pointed to by `attribute.EmptySet` in `go.opentelemetry.io/otel/attribute`. (#7357) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372) +- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403) +- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.37.0`. (#7655) +- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.36.0`. (#7656) + +### Removed + +- Drop support for [Go 1.23]. (#7274) +- Remove the `FilterProcessor` interface in `go.opentelemetry.io/otel/sdk/log`. + The `Enabled` method has been added to the `Processor` interface instead. + All `Processor` implementations must now implement the `Enabled` method. + Custom processors that do not filter records can implement `Enabled` to return `true`. (#7639) + +## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29 + +This release is the last to support [Go 1.23]. +The next release will require at least [Go 1.24]. + +### Added + +- Add native histogram exemplar support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6772) +- Add template attribute functions to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6939) + - `ContainerLabel` + - `DBOperationParameter` + - `DBSystemParameter` + - `HTTPRequestHeader` + - `HTTPResponseHeader` + - `K8SCronJobAnnotation` + - `K8SCronJobLabel` + - `K8SDaemonSetAnnotation` + - `K8SDaemonSetLabel` + - `K8SDeploymentAnnotation` + - `K8SDeploymentLabel` + - `K8SJobAnnotation` + - `K8SJobLabel` + - `K8SNamespaceAnnotation` + - `K8SNamespaceLabel` + - `K8SNodeAnnotation` + - `K8SNodeLabel` + - `K8SPodAnnotation` + - `K8SPodLabel` + - `K8SReplicaSetAnnotation` + - `K8SReplicaSetLabel` + - `K8SStatefulSetAnnotation` + - `K8SStatefulSetLabel` + - `ProcessEnvironmentVariable` + - `RPCConnectRPCRequestMetadata` + - `RPCConnectRPCResponseMetadata` + - `RPCGRPCRequestMetadata` + - `RPCGRPCResponseMetadata` +- Add `ErrorType` attribute helper function to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6962) +- Add `WithAllowKeyDuplication` in `go.opentelemetry.io/otel/sdk/log` which can be used to disable deduplication for log records. (#6968) +- Add `WithCardinalityLimit` option to configure the cardinality limit in `go.opentelemetry.io/otel/sdk/metric`. (#6996, #7065, #7081, #7164, #7165, #7179) +- Add `Clone` method to `Record` in `go.opentelemetry.io/otel/log` that returns a copy of the record with no shared state. (#7001) +- Add experimental self-observability span and batch span processor metrics in `go.opentelemetry.io/otel/sdk/trace`. + Check the `go.opentelemetry.io/otel/sdk/trace/internal/x` package documentation for more information. (#7027, #6393, #7209) +- The `go.opentelemetry.io/otel/semconv/v1.36.0` package. + The package contains semantic conventions from the `v1.36.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.36.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.34.0.`(#7032, #7041) +- Add support for configuring Prometheus name translation using `WithTranslationStrategy` option in `go.opentelemetry.io/otel/exporters/prometheus`. The current default translation strategy when UTF-8 mode is enabled is `NoUTF8EscapingWithSuffixes`, but a future release will change the default strategy to `UnderscoreEscapingWithSuffixes` for compliance with the specification. (#7111) +- Add experimental self-observability log metrics in `go.opentelemetry.io/otel/sdk/log`. + Check the `go.opentelemetry.io/otel/sdk/log/internal/x` package documentation for more information. (#7121) +- Add experimental self-observability trace exporter metrics in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. + Check the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x` package documentation for more information. (#7133) +- Support testing of [Go 1.25]. (#7187) +- The `go.opentelemetry.io/otel/semconv/v1.37.0` package. + The package contains semantic conventions from the `v1.37.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.37.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.36.0.`(#7254) + +### Changed + +- Optimize `TraceIDFromHex` and `SpanIDFromHex` in `go.opentelemetry.io/otel/sdk/trace`. (#6791) +- Change `AssertEqual` in `go.opentelemetry.io/otel/log/logtest` to accept `TestingT` in order to support benchmarks and fuzz tests. (#6908) +- Change `DefaultExemplarReservoirProviderSelector` in `go.opentelemetry.io/otel/sdk/metric` to use `runtime.GOMAXPROCS(0)` instead of `runtime.NumCPU()` for the `FixedSizeReservoirProvider` default size. (#7094) + +### Fixed + +- `SetBody` method of `Record` in `go.opentelemetry.io/otel/sdk/log` now deduplicates key-value collections (`log.Value` of `log.KindMap` from `go.opentelemetry.io/otel/log`). (#7002) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` to not append a suffix if it's already present in metric name. (#7088) +- Fix the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` self-observability component type and name. (#7195) +- Fix partial export count metric in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7199) + +### Deprecated + +- Deprecate `WithoutUnits` and `WithoutCounterSuffixes` options, preferring `WithTranslationStrategy` instead. (#7111) +- Deprecate support for `OTEL_GO_X_CARDINALITY_LIMIT` environment variable in `go.opentelemetry.io/otel/sdk/metric`. Use `WithCardinalityLimit` option instead. (#7166) + +## [0.59.1] 2025-07-21 + +### Changed + +- Retract `v0.59.0` release of `go.opentelemetry.io/otel/exporters/prometheus` module which appends incorrect unit suffixes. (#7046) +- Change `go.opentelemetry.io/otel/exporters/prometheus` to no longer deduplicate suffixes when UTF8 is enabled. + It is recommended to disable unit and counter suffixes in the exporter, and manually add suffixes if you rely on the existing behavior. (#7044) + +### Fixed + +- Fix `go.opentelemetry.io/otel/exporters/prometheus` to properly handle unit suffixes when the unit is in brackets. + E.g. `{spans}`. (#7044) + ## [1.37.0/0.59.0/0.13.0] 2025-06-25 ### Added @@ -3343,7 +3498,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.39.0...HEAD +[1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0 +[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0 +[0.59.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/exporters/prometheus/v0.59.1 [1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 [0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 [0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 @@ -3439,6 +3597,7 @@ It contains api and sdk for trace and meter. +[Go 1.25]: https://go.dev/doc/go1.25 [Go 1.24]: https://go.dev/doc/go1.24 [Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 945a07d2b..26a03aed1 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @pellared @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu @flc1125 CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index f9ddc281f..ff5e1f76e 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -54,8 +54,8 @@ go get -d go.opentelemetry.io/otel (This may print some warning about "build constraints exclude all Go files", just ignore it.) -This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You -can alternatively use `git` directly with: +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. +Alternatively, you can use `git` directly with: ```sh git clone https://github.com/open-telemetry/opentelemetry-go @@ -65,8 +65,7 @@ git clone https://github.com/open-telemetry/opentelemetry-go that name is a kind of a redirector to GitHub that `go get` can understand, but `git` does not.) -This would put the project in the `opentelemetry-go` directory in -current working directory. +This will add the project as `opentelemetry-go` within the current directory. Enter the newly created directory and add your fork as a new remote: @@ -109,7 +108,7 @@ A PR is considered **ready to merge** when: This is not enforced through automation, but needs to be validated by the maintainer merging. - * At least one of the qualified approvals need to be from an + * At least one of the qualified approvals needs to be from an [Approver]/[Maintainer] affiliated with a different company than the author of the PR. * PRs introducing changes that have already been discussed and consensus @@ -166,11 +165,11 @@ guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). ### Focus on Capabilities, Not Structure Compliance OpenTelemetry is an evolving specification, one where the desires and -use cases are clear, but the method to satisfy those uses cases are +use cases are clear, but the methods to satisfy those use cases are not. As such, Contributions should provide functionality and behavior that -conforms to the specification, but the interface and structure is +conforms to the specification, but the interface and structure are flexible. It is preferable to have contributions follow the idioms of the @@ -192,6 +191,35 @@ should have `go test -bench` output in their description. should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) output in their description. +## Dependencies + +This project uses [Go Modules] for dependency management. All modules will use +`go.mod` to explicitly list all direct and indirect dependencies, ensuring a +clear dependency graph. The `go.sum` file for each module will be committed to +the repository and used to verify the integrity of downloaded modules, +preventing malicious tampering. + +This project uses automated dependency update tools (i.e. dependabot, +renovatebot) to manage updates to dependencies. This ensures that dependencies +are kept up-to-date with the latest security patches and features and are +reviewed before being merged. If you would like to propose a change to a +dependency it should be done through a pull request that updates the `go.mod` +file and includes a description of the change. + +See the [versioning and compatibility](./VERSIONING.md) policy for more details +about dependency compatibility. + +[Go Modules]: https://pkg.go.dev/cmd/go#hdr-Modules__module_versions__and_more + +### Environment Dependencies + +This project does not partition dependencies based on the environment (i.e. +`development`, `staging`, `production`). + +Only the dependencies explicitly included in the released modules have been +tested and verified to work with the released code. No other guarantee is made +about the compatibility of other dependencies. + ## Documentation Each (non-internal, non-test) package must be documented using @@ -233,6 +261,10 @@ For a non-comprehensive but foundational overview of these best practices the [Effective Go](https://golang.org/doc/effective_go.html) documentation is an excellent starting place. +We also recommend following the +[Go Code Review Comments](https://go.dev/wiki/CodeReviewComments) +that collects common comments made during reviews of Go code. + As a convenience for developers building this project the `make precommit` will format, lint, validate, and in some cases fix the changes you plan to submit. This check will need to pass for your changes to be able to be @@ -586,6 +618,10 @@ See also: ### Testing +We allow using [`testify`](https://github.com/stretchr/testify) even though +it is seen as non-idiomatic according to +the [Go Test Comments](https://go.dev/wiki/TestComments#assert-libraries) page. + The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the @@ -598,8 +634,8 @@ is not in their root name. The use of internal packages should be scoped to a single module. A sub-module should never import from a parent internal package. This creates a coupling -between the two modules where a user can upgrade the parent without the child -and if the internal package API has changed it will fail to upgrade[^3]. +between the two modules where a user can upgrade the parent without the child, +and if the internal package API has changed, it will fail to upgrade[^3]. There are two known exceptions to this rule: @@ -620,7 +656,7 @@ this. ### Ignoring context cancellation -OpenTelemetry API implementations need to ignore the cancellation of the context that are +OpenTelemetry API implementations need to ignore the cancellation of the context that is passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). Recording methods should not return an error describing the cancellation state of the context when they complete, nor should they abort any work. @@ -638,14 +674,442 @@ force flushing telemetry, shutting down a signal provider) the context cancellat should be honored. This means all work done on behalf of the user provided context should be canceled. -## Approvers and Maintainers +### Observability -### Triagers +OpenTelemetry Go SDK components should be instrumented to enable users observability for the health and performance of the telemetry pipeline itself. +This allows operators to understand how well their observability infrastructure is functioning and to identify potential issues before they impact their applications. -- [Alex Kats](https://github.com/akats7), Capital One -- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent +This section outlines the best practices for building instrumentation in OpenTelemetry Go SDK components. -### Approvers +#### Environment Variable Activation + +Observability features are currently experimental. +They should be disabled by default and activated through the `OTEL_GO_X_OBSERVABILITY` environment variable. +This follows the established experimental feature pattern used throughout the SDK. + +Components should check for this environment variable using a consistent pattern: + +```go +import "go.opentelemetry.io/otel/*/internal/x" + +if x.Observability.Enabled() { + // Initialize observability metrics +} +``` + +**References**: + +- [stdouttrace exporter](./exporters/stdout/stdouttrace/internal/x/x.go) +- [sdk](./sdk/internal/x/x.go) + +#### Encapsulation + +Instrumentation should be encapsulated within a dedicated `struct` (e.g. `instrumentation`). +It should not be mixed into the instrumented component. + +Prefer this: + +```go +type SDKComponent struct { + inst *instrumentation +} + +type instrumentation struct { + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} +``` + +To this: + +```go +// ❌ Avoid this pattern. +type SDKComponent struct { + /* other SDKComponent fields... */ + + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} +``` + +The instrumentation code should not bloat the code being instrumented. +Likely, this means its own file, or its own package if it is complex or reused. + +#### Initialization + +Instrumentation setup should be explicit, side-effect free, and local to the relevant component. +Avoid relying on global or implicit [side effects][side-effect] for initialization. + +Encapsulate setup in constructor functions, ensuring clear ownership and scope: + +```go +import ( + "errors" + + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +type SDKComponent struct { + inst *instrumentation +} + +func NewSDKComponent(config Config) (*SDKComponent, error) { + inst, err := newInstrumentation() + if err != nil { + return nil, err + } + return &SDKComponent{inst: inst}, nil +} + +type instrumentation struct { + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} + +func newInstrumentation() (*instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + "", + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + inst := &instrumentation{} + + var err, e error + inst.inflight, e = otelconv.NewSDKComponentInflight(meter) + err = errors.Join(err, e) + + inst.exported, e = otelconv.NewSDKComponentExported(meter) + err = errors.Join(err, e) + + return inst, err +} +``` + +```go +// ❌ Avoid this pattern. +func (c *Component) initObservability() { + // Initialize observability metrics + if !x.Observability.Enabled() { + return + } + + // Initialize observability metrics + c.inst = &instrumentation{/* ... */} +} +``` + +[side-effect]: https://en.wikipedia.org/wiki/Side_effect_(computer_science) + +#### Performance + +When observability is disabled there should be little to no overhead. + +```go +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + if e.inst != nil { + attrs := expensiveOperation() + e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) + } + // Export spans... +} +``` + +```go +// ❌ Avoid this pattern. +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + attrs := expensiveOperation() + e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) + // Export spans... +} + +func (i *instrumentation) recordSpanInflight(ctx context.Context, count int64, attrs ...attribute.KeyValue) { + if i == nil || i.inflight == nil { + return + } + i.inflight.Add(ctx, count, metric.WithAttributes(attrs...)) +} +``` + +When observability is enabled, the instrumentation code paths should be optimized to reduce allocation and computation overhead. + +##### Attribute and Option Allocation Management + +Pool attribute slices and options with [`sync.Pool`] to minimize allocations in measurement calls with dynamic attributes. + +```go +var ( + attrPool = sync.Pool{ + New: func() any { + // Pre-allocate common capacity + knownCap := 8 // Adjust based on expected usage + s := make([]attribute.KeyValue, 0, knownCap) + // Return a pointer to avoid extra allocation on Put(). + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + // Return a pointer to avoid extra allocation on Put(). + return &o + }, + } +) + +func (i *instrumentation) record(ctx context.Context, value int64, baseAttrs ...attribute.KeyValue) { + attrs := attrPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // Reset. + attrPool.Put(attrs) + }() + + *attrs = append(*attrs, baseAttrs...) + // Add any dynamic attributes. + *attrs = append(*attrs, semconv.OTelComponentName("exporter-1")) + + addOpt := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *addOpt = (*addOpt)[:0] + addOptPool.Put(addOpt) + }() + + set := attribute.NewSet(*attrs...) + *addOpt = append(*addOpt, metric.WithAttributeSet(set)) + + i.counter.Add(ctx, value, *addOpt...) +} +``` + +Pools are most effective when there are many pooled objects of the same sufficiently large size, and the objects are repeatedly used. +This amortizes the cost of allocation and synchronization. +Ideally, the pools should be scoped to be used as widely as possible within the component to maximize this efficiency while still ensuring correctness. + +[`sync.Pool`]: https://pkg.go.dev/sync#Pool + +##### Cache common attribute sets for repeated measurements + +If a static set of attributes are used for measurements and they are known at compile time, pre-compute and cache these attributes. + +```go +type spanLiveSetKey struct { + sampled bool +} + +var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ + {true}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + {false}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + ), +} + +func spanLiveSet(sampled bool) attribute.Set { + key := spanLiveSetKey{sampled: sampled} + return spanLiveSetCache[key] +} +``` + +##### Benchmarking + +Always provide benchmarks when introducing or refactoring instrumentation. +Demonstrate the impact (allocs/op, B/op, ns/op) in enabled/disabled scenarios: + +```go +func BenchmarkExportSpans(b *testing.B) { + scenarios := []struct { + name string + obsEnabled bool + }{ + {"ObsDisabled", false}, + {"ObsEnabled", true}, + } + + for _, scenario := range scenarios { + b.Run(scenario.name, func(b *testing.B) { + b.Setenv( + "OTEL_GO_X_OBSERVABILITY", + strconv.FormatBool(scenario.obsEnabled), + ) + + exporter := NewExporter() + spans := generateTestSpans(100) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _ = exporter.ExportSpans(context.Background(), spans) + } + }) + } +} +``` + +#### Error Handling and Robustness + +Errors should be reported back to the caller if possible, and partial failures should be handled as gracefully as possible. + +```go +func newInstrumentation() (*instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + m := otel.GetMeterProvider().Meter(/* initialize meter */) + counter, err := otelconv.NewSDKComponentCounter(m) + // Use the partially initialized counter if available. + i := &instrumentation{counter: counter} + // Return any error to the caller. + return i, err +} +``` + +```go +// ❌ Avoid this pattern. +func newInstrumentation() *instrumentation { + if !x.Observability.Enabled() { + return nil, nil + } + + m := otel.GetMeterProvider().Meter(/* initialize meter */) + counter, err := otelconv.NewSDKComponentCounter(m) + if err != nil { + // ❌ Do not dump the error to the OTel Handler. Return it to the + // caller. + otel.Handle(err) + // ❌ Do not return nil if we can still use the partially initialized + // counter. + return nil + } + return &instrumentation{counter: counter} +} +``` + +If the instrumented component cannot report the error to the user, let it report the error to `otel.Handle`. + +#### Context Propagation + +Ensure observability measurements receive the correct context, especially for trace exemplars and distributed context: + +```go +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + // Use the provided context for observability measurements + e.inst.recordSpanExportStarted(ctx, len(spans)) + + err := e.doExport(ctx, spans) + + if err != nil { + e.inst.recordSpanExportFailed(ctx, len(spans), err) + } else { + e.inst.recordSpanExportSucceeded(ctx, len(spans)) + } + + return err +} +``` + +```go +// ❌ Avoid this pattern. +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + // ❌ Do not break the context propagation. + e.inst.recordSpanExportStarted(context.Background(), len(spans)) + + err := e.doExport(ctx, spans) + + /* ... */ + + return err +} +``` + +#### Semantic Conventions Compliance + +All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md). + +Use the metric semantic conventions convenience package [otelconv](./semconv/v1.37.0/otelconv/metric.go). + +##### Component Identification + +Component names and types should follow [semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/registry/attributes/otel.md#otel-component-attributes). + +If a component is not a well-known type specified in the semantic conventions, use the package path scope type as a stable identifier. + +```go +componentType := "go.opentelemetry.io/otel/sdk/trace.Span" +``` + +```go +// ❌ Do not do this. +componentType := "trace-span" +``` + +The component name should be a stable unique identifier for the specific instance of the component. + +Use a global counter to ensure uniqueness if necessary. + +```go +// Unique 0-based ID counter for component instances. +var componentIDCounter atomic.Int64 + +// nextID returns the next unique ID for a component. +func nextID() int64 { + return componentIDCounter.Add(1) - 1 +} + +// componentName returns a unique name for the component instance. +func componentName() attribute.KeyValue { + id := nextID() + name := fmt.Sprintf("%s/%d", componentType, id) + return semconv.OTelComponentName(name) +} +``` + +The component ID will need to be resettable for deterministic testing. +If tests are in a different package than the component being tested (i.e. a `_test` package name), use a generated `counter` internal package to manage the counter. +See [stdouttrace exporter example](./exporters/stdout/stdouttrace/internal/gen.go) for reference. + +#### Testing + +Use deterministic testing with isolated state: + +```go +func TestObservability(t *testing.T) { + // Restore state after test to ensure this does not affect other tests. + prev := otel.GetMeterProvider() + t.Cleanup(func() { otel.SetMeterProvider(prev) }) + + // Isolate the meter provider for deterministic testing + reader := metric.NewManualReader() + meterProvider := metric.NewMeterProvider(metric.WithReader(reader)) + otel.SetMeterProvider(meterProvider) + + // Use t.Setenv to ensure environment variable is restored after test. + t.Setenv("OTEL_GO_X_OBSERVABILITY", "true") + + // Reset component ID counter to ensure deterministic component names. + componentIDCounter.Store(0) + + /* ... test code ... */ +} +``` + +Test order should not affect results. +Ensure that any global state (e.g. component ID counters) is reset between tests. + +## Approvers and Maintainers ### Maintainers @@ -655,16 +1119,33 @@ should be canceled. - [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA)) - [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A)) +For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer). + +### Approvers + +- [Flc](https://github.com/flc1125), Independent + +For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver). + +### Triagers + +- [Alex Kats](https://github.com/akats7), Capital One + +For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager). + ### Emeritus - [Aaron Clawson](https://github.com/MadVikingGod) - [Anthony Mirabella](https://github.com/Aneurysm9) +- [Cheng-Zhen Yang](https://github.com/scorpionknifes) - [Chester Cheung](https://github.com/hanyuancheung) - [Evan Torrie](https://github.com/evantorrie) - [Gustavo Silva Paiva](https://github.com/paivagustavo) - [Josh MacDonald](https://github.com/jmacd) - [Liz Fong-Jones](https://github.com/lizthegrey) +For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager). + ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/LICENSE +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 4fa423ca0..44870248c 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -34,9 +34,6 @@ $(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) MULTIMOD = $(TOOLS)/multimod $(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod -SEMCONVGEN = $(TOOLS)/semconvgen -$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen - CROSSLINK = $(TOOLS)/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink @@ -71,7 +68,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -149,11 +146,12 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe test-fuzz .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short +test-fuzz: ARGS=-fuzztime=10s -fuzz test-verbose: ARGS=-v -race test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race test-concurrent-safe: TIMEOUT=120 @@ -284,7 +282,7 @@ semconv-generate: $(SEMCONVKIT) docker run --rm \ -u $(DOCKER_USER) \ --env HOME=/tmp/weaver \ - --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \ + --mount 'type=bind,source=$(PWD)/semconv/templates,target=/home/weaver/templates,readonly' \ --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ $(WEAVER_IMAGE) registry generate \ diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 5fa1b75c6..c63359543 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -53,20 +53,20 @@ Currently, this project supports the following environments. | OS | Go Version | Architecture | |----------|------------|--------------| +| Ubuntu | 1.25 | amd64 | | Ubuntu | 1.24 | amd64 | -| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.25 | 386 | | Ubuntu | 1.24 | 386 | -| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.25 | arm64 | | Ubuntu | 1.24 | arm64 | -| Ubuntu | 1.23 | arm64 | -| macOS 13 | 1.24 | amd64 | -| macOS 13 | 1.23 | amd64 | +| macOS | 1.25 | amd64 | +| macOS | 1.24 | amd64 | +| macOS | 1.25 | arm64 | | macOS | 1.24 | arm64 | -| macOS | 1.23 | arm64 | +| Windows | 1.25 | amd64 | | Windows | 1.24 | amd64 | -| Windows | 1.23 | amd64 | +| Windows | 1.25 | 386 | | Windows | 1.24 | 386 | -| Windows | 1.23 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 1ddcdef03..861756fd7 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -24,7 +24,7 @@ Ensure things look correct before submitting a pull request to include the addit ## Breaking changes validation -You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. +You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API. You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). @@ -62,7 +62,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` 3. Update the [Changelog](./CHANGELOG.md). - - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + - Make sure all relevant changes for this release are included and are written in language that non-contributors to the project can understand. To verify this, you can look directly at the commits since the ``. ``` @@ -107,34 +107,50 @@ It is critical you make sure the version you push upstream is correct. ... ``` -## Release +## Sign artifacts -Finally create a Release for the new `` on GitHub. -The release body should include all the release notes from the Changelog for this release. +To ensure we comply with CNCF best practices, we need to sign the release artifacts. -### Sign the Release Artifact +Download the `.tar.gz` and `.zip` archives from the [tags page](https://github.com/open-telemetry/opentelemetry-go/tags) for the new release tag. +Both archives need to be signed with your GPG key. -To ensure we comply with CNCF best practices, we need to sign the release artifact. -The tarball attached to the GitHub release needs to be signed with your GPG key. +You can use [this script] to verify the contents of the archives before signing them. -Follow [these steps] to sign the release artifact and upload it to GitHub. -You can use [this script] to verify the contents of the tarball before signing it. +To find your GPG key ID, run: -Be sure to use the correct GPG key when signing the release artifact. +```terminal +gpg --list-secret-keys --keyid-format=long +``` + +The key ID is the 16-character string after `sec rsa4096/` (or similar). + +Set environment variables and sign both artifacts: ```terminal -gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz +export VERSION="" # e.g., v1.32.0 +export KEY_ID="" + +gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.tar.gz +gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.zip ``` -You can verify the signature with: +You can verify the signatures with: ```terminal -gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz +gpg --verify opentelemetry-go-$VERSION.tar.gz.asc opentelemetry-go-$VERSION.tar.gz +gpg --verify opentelemetry-go-$VERSION.zip.asc opentelemetry-go-$VERSION.zip ``` -[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases [this script]: https://github.com/MrAlias/attest-sh +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +***IMPORTANT***: GitHub Releases are immutable once created. +You must upload the signed artifacts (`.tar.gz`, `.tar.gz.asc`, `.zip`, and `.zip.asc`) when creating the release, as they cannot be added or modified later. + ## Post-Release ### Contrib Repository @@ -160,14 +176,6 @@ This helps track what changes were included in each release. Once all related issues and PRs have been added to the milestone, close the milestone. -### Demo Repository - -Bump the dependencies in the following Go services: - -- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) - ### Close the `Version Release` issue Once the todo list in the `Version Release` issue is complete, close the issue. diff --git a/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml new file mode 100644 index 000000000..8041fc62e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml @@ -0,0 +1,203 @@ +header: + schema-version: "1.0.0" + expiration-date: "2026-08-04T00:00:00.000Z" + last-updated: "2025-08-04" + last-reviewed: "2025-08-04" + commit-hash: 69e81088ad40f45a0764597326722dea8f3f00a8 + project-url: https://github.com/open-telemetry/opentelemetry-go + project-release: "v1.37.0" + changelog: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CHANGELOG.md + license: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/LICENSE + +project-lifecycle: + status: active + bug-fixes-only: false + core-maintainers: + - https://github.com/dmathieu + - https://github.com/dashpole + - https://github.com/pellared + - https://github.com/XSAM + - https://github.com/MrAlias + release-process: | + See https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/RELEASING.md + +contribution-policy: + accepts-pull-requests: true + accepts-automated-pull-requests: true + automated-tools-list: + - automated-tool: dependabot + action: allowed + comment: Automated dependency updates are accepted. + - automated-tool: renovatebot + action: allowed + comment: Automated dependency updates are accepted. + - automated-tool: opentelemetrybot + action: allowed + comment: Automated OpenTelemetry actions are accepted. + contributing-policy: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + code-of-conduct: https://github.com/open-telemetry/.github/blob/ffa15f76b65ec7bcc41f6a0b277edbb74f832206/CODE_OF_CONDUCT.md + +documentation: + - https://pkg.go.dev/go.opentelemetry.io/otel + - https://opentelemetry.io/docs/instrumentation/go/ + +distribution-points: + - pkg:golang/go.opentelemetry.io/otel + - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus + - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus/test + - pkg:golang/go.opentelemetry.io/otel/bridge/opentracing + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - pkg:golang/go.opentelemetry.io/otel/exporters/zipkin + - pkg:golang/go.opentelemetry.io/otel/metric + - pkg:golang/go.opentelemetry.io/otel/sdk + - pkg:golang/go.opentelemetry.io/otel/sdk/metric + - pkg:golang/go.opentelemetry.io/otel/trace + - pkg:golang/go.opentelemetry.io/otel/exporters/prometheus + - pkg:golang/go.opentelemetry.io/otel/log + - pkg:golang/go.opentelemetry.io/otel/log/logtest + - pkg:golang/go.opentelemetry.io/otel/sdk/log + - pkg:golang/go.opentelemetry.io/otel/sdk/log/logtest + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutlog + - pkg:golang/go.opentelemetry.io/otel/schema + +security-artifacts: + threat-model: + threat-model-created: false + comment: | + No formal threat model created yet. + self-assessment: + self-assessment-created: false + comment: | + No formal self-assessment yet. + +security-testing: + - tool-type: sca + tool-name: Dependabot + tool-version: latest + tool-url: https://github.com/dependabot + tool-rulesets: + - built-in + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + Automated dependency updates. + - tool-type: sast + tool-name: golangci-lint + tool-version: latest + tool-url: https://github.com/golangci/golangci-lint + tool-rulesets: + - built-in + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + Static analysis in CI. + - tool-type: fuzzing + tool-name: OSS-Fuzz + tool-version: latest + tool-url: https://github.com/google/oss-fuzz + tool-rulesets: + - default + integration: + ad-hoc: false + ci: false + before-release: false + comment: | + OpenTelemetry Go is integrated with OSS-Fuzz for continuous fuzz testing. See https://github.com/google/oss-fuzz/tree/f0f9b221190c6063a773bea606d192ebfc3d00cf/projects/opentelemetry-go for more details. + - tool-type: sast + tool-name: CodeQL + tool-version: latest + tool-url: https://github.com/github/codeql + tool-rulesets: + - default + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + CodeQL static analysis is run in CI for all commits and pull requests to detect security vulnerabilities in the Go source code. See https://github.com/open-telemetry/opentelemetry-go/blob/d5b5b059849720144a03ca5c87561bfbdb940119/.github/workflows/codeql-analysis.yml for workflow details. + - tool-type: sca + tool-name: govulncheck + tool-version: latest + tool-url: https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck + tool-rulesets: + - default + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + govulncheck is run in CI to detect known vulnerabilities in Go modules and code paths. See https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/.github/workflows/ci.yml for workflow configuration. + +security-assessments: + - auditor-name: 7ASecurity + auditor-url: https://7asecurity.com + auditor-report: https://7asecurity.com/reports/pentest-report-opentelemetry.pdf + report-year: 2023 + comment: | + This independent penetration test by 7ASecurity covered OpenTelemetry repositories including opentelemetry-go. The assessment focused on codebase review, threat modeling, and vulnerability identification. See the report for details of findings and recommendations applicable to opentelemetry-go. No critical vulnerabilities were found for this repository. + +security-contacts: + - type: email + value: cncf-opentelemetry-security@lists.cncf.io + primary: true + - type: website + value: https://github.com/open-telemetry/opentelemetry-go/security/policy + primary: false + +vulnerability-reporting: + accepts-vulnerability-reports: true + email-contact: cncf-opentelemetry-security@lists.cncf.io + security-policy: https://github.com/open-telemetry/opentelemetry-go/security/policy + comment: | + Security issues should be reported via email or GitHub security policy page. + +dependencies: + third-party-packages: true + dependencies-lists: + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/test/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opentracing/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploggrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploghttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracegrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracehttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/prometheus/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutlog/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutmetric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdouttrace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/zipkin/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/internal/tools/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/logtest/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/metric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/schema/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/logtest/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/metric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/internal/telemetry/test/go.mod + dependencies-lifecycle: + policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + comment: | + Dependency lifecycle managed via go.mod and renovatebot. + env-dependencies-policy: + policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + comment: | + See contributing policy for environment usage. diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index b8cb605c1..b27c9e84f 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -83,7 +83,7 @@ is designed so the following goals can be achieved. in either the module path or the import path. * In addition to public APIs, telemetry produced by stable instrumentation will remain stable and backwards compatible. This is to avoid breaking - alerts and dashboard. + alerts and dashboards. * Modules will be used to encapsulate instrumentation, detectors, exporters, propagators, and any other independent sets of related components. * Experimental modules still under active development will be versioned at diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 318e42fca..6cc1a1655 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -16,7 +16,7 @@ type ( // set into a wire representation. Encoder interface { // Encode returns the serialized encoding of the attribute set using - // its Iterator. This result may be cached by a attribute.Set. + // its Iterator. This result may be cached by an attribute.Set. Encode(iterator Iterator) string // ID returns a value that is unique for each class of attribute @@ -78,7 +78,7 @@ func DefaultEncoder() Encoder { defaultEncoderOnce.Do(func() { defaultEncoderInstance = &defaultAttrEncoder{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return &bytes.Buffer{} }, }, @@ -96,11 +96,11 @@ func (d *defaultAttrEncoder) Encode(iter Iterator) string { for iter.Next() { i, keyValue := iter.IndexedAttribute() if i > 0 { - _, _ = buf.WriteRune(',') + _ = buf.WriteByte(',') } copyAndEscape(buf, string(keyValue.Key)) - _, _ = buf.WriteRune('=') + _ = buf.WriteByte('=') if keyValue.Value.Type() == STRING { copyAndEscape(buf, keyValue.Value.AsString()) @@ -122,14 +122,14 @@ func copyAndEscape(buf *bytes.Buffer, val string) { for _, ch := range val { switch ch { case '=', ',', escapeChar: - _, _ = buf.WriteRune(escapeChar) + _ = buf.WriteByte(escapeChar) } _, _ = buf.WriteRune(ch) } } -// Valid returns true if this encoder ID was allocated by -// `NewEncoderID`. Invalid encoder IDs will not be cached. +// Valid reports whether this encoder ID was allocated by +// [NewEncoderID]. Invalid encoder IDs will not be cached. func (id EncoderID) Valid() bool { return id.value != 0 } diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go index 3eeaa5d44..624ebbe38 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -15,8 +15,8 @@ type Filter func(KeyValue) bool // // If keys is empty a deny-all filter is returned. func NewAllowKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return false } + if len(keys) == 0 { + return func(KeyValue) bool { return false } } allowed := make(map[Key]struct{}, len(keys)) @@ -34,8 +34,8 @@ func NewAllowKeysFilter(keys ...Key) Filter { // // If keys is empty an allow-all filter is returned. func NewDenyKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return true } + if len(keys) == 0 { + return func(KeyValue) bool { return true } } forbid := make(map[Key]struct{}, len(keys)) diff --git a/vendor/go.opentelemetry.io/otel/attribute/hash.go b/vendor/go.opentelemetry.io/otel/attribute/hash.go new file mode 100644 index 000000000..6aa69aeae --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/hash.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute/internal/xxhash" +) + +// Type identifiers. These identifiers are hashed before the value of the +// corresponding type. This is done to distinguish values that are hashed with +// the same value representation (e.g. `int64(1)` and `true`, []int64{0} and +// int64(0)). +// +// These are all 8 byte length strings converted to a uint64 representation. A +// uint64 is used instead of the string directly as an optimization, it avoids +// the for loop in [xxhash] which adds minor overhead. +const ( + boolID uint64 = 7953749933313450591 // "_boolean" (little endian) + int64ID uint64 = 7592915492740740150 // "64_bit_i" (little endian) + float64ID uint64 = 7376742710626956342 // "64_bit_f" (little endian) + stringID uint64 = 6874584755375207263 // "_string_" (little endian) + boolSliceID uint64 = 6875993255270243167 // "_[]bool_" (little endian) + int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian) + float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian) + stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian) +) + +// hashKVs returns a new xxHash64 hash of kvs. +func hashKVs(kvs []KeyValue) uint64 { + h := xxhash.New() + for _, kv := range kvs { + h = hashKV(h, kv) + } + return h.Sum64() +} + +// hashKV returns the xxHash64 hash of kv with h as the base. +func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash { + h = h.String(string(kv.Key)) + + switch kv.Value.Type() { + case BOOL: + h = h.Uint64(boolID) + h = h.Uint64(kv.Value.numeric) + case INT64: + h = h.Uint64(int64ID) + h = h.Uint64(kv.Value.numeric) + case FLOAT64: + h = h.Uint64(float64ID) + // Assumes numeric stored with math.Float64bits. + h = h.Uint64(kv.Value.numeric) + case STRING: + h = h.Uint64(stringID) + h = h.String(kv.Value.stringly) + case BOOLSLICE: + h = h.Uint64(boolSliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Bool(rv.Index(i).Bool()) + } + case INT64SLICE: + h = h.Uint64(int64SliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Int64(rv.Index(i).Int()) + } + case FLOAT64SLICE: + h = h.Uint64(float64SliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Float64(rv.Index(i).Float()) + } + case STRINGSLICE: + h = h.Uint64(stringSliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.String(rv.Index(i).String()) + } + case INVALID: + default: + // Logging is an alternative, but using the internal logger here + // causes an import cycle so it is not done. + v := kv.Value.AsInterface() + msg := fmt.Sprintf("unknown value type: %[1]v (%[1]T)", v) + panic(msg) + } + return h +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go index b76d2bbfd..087550430 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go @@ -12,7 +12,7 @@ import ( ) // BoolSliceValue converts a bool slice into an array with same elements as slice. -func BoolSliceValue(v []bool) interface{} { +func BoolSliceValue(v []bool) any { var zero bool cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -20,7 +20,7 @@ func BoolSliceValue(v []bool) interface{} { } // Int64SliceValue converts an int64 slice into an array with same elements as slice. -func Int64SliceValue(v []int64) interface{} { +func Int64SliceValue(v []int64) any { var zero int64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -28,7 +28,7 @@ func Int64SliceValue(v []int64) interface{} { } // Float64SliceValue converts a float64 slice into an array with same elements as slice. -func Float64SliceValue(v []float64) interface{} { +func Float64SliceValue(v []float64) any { var zero float64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -36,7 +36,7 @@ func Float64SliceValue(v []float64) interface{} { } // StringSliceValue converts a string slice into an array with same elements as slice. -func StringSliceValue(v []string) interface{} { +func StringSliceValue(v []string) any { var zero string cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -44,7 +44,7 @@ func StringSliceValue(v []string) interface{} { } // AsBoolSlice converts a bool array into a slice into with same elements as array. -func AsBoolSlice(v interface{}) []bool { +func AsBoolSlice(v any) []bool { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -57,7 +57,7 @@ func AsBoolSlice(v interface{}) []bool { } // AsInt64Slice converts an int64 array into a slice into with same elements as array. -func AsInt64Slice(v interface{}) []int64 { +func AsInt64Slice(v any) []int64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -70,7 +70,7 @@ func AsInt64Slice(v interface{}) []int64 { } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. -func AsFloat64Slice(v interface{}) []float64 { +func AsFloat64Slice(v any) []float64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -83,7 +83,7 @@ func AsFloat64Slice(v interface{}) []float64 { } // AsStringSlice converts a string array into a slice into with same elements as array. -func AsStringSlice(v interface{}) []string { +func AsStringSlice(v any) []string { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go new file mode 100644 index 000000000..113a97838 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package xxhash provides a wrapper around the xxhash library for attribute hashing. +package xxhash // import "go.opentelemetry.io/otel/attribute/internal/xxhash" + +import ( + "encoding/binary" + "math" + + "github.com/cespare/xxhash/v2" +) + +// Hash wraps xxhash.Digest to provide an API friendly for hashing attribute values. +type Hash struct { + d *xxhash.Digest +} + +// New returns a new initialized xxHash64 hasher. +func New() Hash { + return Hash{d: xxhash.New()} +} + +func (h Hash) Uint64(val uint64) Hash { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], val) + // errors from Write are always nil for xxhash + // if it returns an err then panic + _, err := h.d.Write(buf[:]) + if err != nil { + panic("xxhash write of uint64 failed: " + err.Error()) + } + return h +} + +func (h Hash) Bool(val bool) Hash { // nolint:revive // This is a hashing function. + if val { + return h.Uint64(1) + } + return h.Uint64(0) +} + +func (h Hash) Float64(val float64) Hash { + return h.Uint64(math.Float64bits(val)) +} + +func (h Hash) Int64(val int64) Hash { + return h.Uint64(uint64(val)) // nolint:gosec // Overflow doesn't matter since we are hashing. +} + +func (h Hash) String(val string) Hash { + // errors from WriteString are always nil for xxhash + // if it returns an err then panic + _, err := h.d.WriteString(val) + if err != nil { + panic("xxhash write of string failed: " + err.Error()) + } + return h +} + +// Sum64 returns the current hash value. +func (h Hash) Sum64() uint64 { + return h.d.Sum64() +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index f2ba89ce4..8df6249f0 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -25,8 +25,8 @@ type oneIterator struct { attr KeyValue } -// Next moves the iterator to the next position. Returns false if there are no -// more attributes. +// Next moves the iterator to the next position. +// Next reports whether there are more attributes. func (i *Iterator) Next() bool { i.idx++ return i.idx < i.Len() @@ -106,7 +106,8 @@ func (oi *oneIterator) advance() { } } -// Next returns true if there is another attribute available. +// Next moves the iterator to the next position. +// Next reports whether there is another attribute available. func (m *MergeIterator) Next() bool { if m.one.done && m.two.done { return false diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go index d9a22c650..80a9e5643 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -117,7 +117,7 @@ func (k Key) StringSlice(v []string) KeyValue { } } -// Defined returns true for non-empty keys. +// Defined reports whether the key is not empty. func (k Key) Defined() bool { return len(k) != 0 } diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 3028f9a40..8c6928ca7 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -13,7 +13,7 @@ type KeyValue struct { Value Value } -// Valid returns if kv is a valid OpenTelemetry attribute. +// Valid reports whether kv is a valid OpenTelemetry attribute. func (kv KeyValue) Valid() bool { return kv.Key.Defined() && kv.Value.Type() != INVALID } diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 6cbefcead..911d557ee 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -9,6 +9,8 @@ import ( "reflect" "slices" "sort" + + "go.opentelemetry.io/otel/attribute/internal/xxhash" ) type ( @@ -23,19 +25,19 @@ type ( // the Equals method to ensure stable equivalence checking. // // Users should also use the Distinct returned from Equivalent as a map key - // instead of a Set directly. In addition to that type providing guarantees - // on stable equivalence, it may also provide performance improvements. + // instead of a Set directly. Set has relatively poor performance when used + // as a map key compared to Distinct. Set struct { - equivalent Distinct + hash uint64 + data any } - // Distinct is a unique identifier of a Set. + // Distinct is an identifier of a Set which is very likely to be unique. // - // Distinct is designed to be ensures equivalence stability: comparisons - // will return the save value across versions. For this reason, Distinct - // should always be used as a map key instead of a Set. + // Distinct should be used as a map key instead of a Set for to provide better + // performance for map operations. Distinct struct { - iface interface{} + hash uint64 } // Sortable implements sort.Interface, used for sorting KeyValue. @@ -46,15 +48,34 @@ type ( Sortable []KeyValue ) +// Compile time check these types remain comparable. +var ( + _ = isComparable(Set{}) + _ = isComparable(Distinct{}) +) + +func isComparable[T comparable](t T) T { return t } + var ( // keyValueType is used in computeDistinctReflect. keyValueType = reflect.TypeOf(KeyValue{}) - // emptySet is returned for empty attribute sets. - emptySet = &Set{ - equivalent: Distinct{ - iface: [0]KeyValue{}, - }, + // emptyHash is the hash of an empty set. + emptyHash = xxhash.New().Sum64() + + // userDefinedEmptySet is an empty set. It was mistakenly exposed to users + // as something they can assign to, so it must remain addressable and + // mutable. + // + // This is kept for backwards compatibility, but should not be used in new code. + userDefinedEmptySet = &Set{ + hash: emptyHash, + data: [0]KeyValue{}, + } + + emptySet = Set{ + hash: emptyHash, + data: [0]KeyValue{}, } ) @@ -62,33 +83,35 @@ var ( // // This is a convenience provided for optimized calling utility. func EmptySet() *Set { - return emptySet + // Continue to return the pointer to the user-defined empty set for + // backwards-compatibility. + // + // New code should not use this, instead use emptySet. + return userDefinedEmptySet } -// reflectValue abbreviates reflect.ValueOf(d). -func (d Distinct) reflectValue() reflect.Value { - return reflect.ValueOf(d.iface) -} +// Valid reports whether this value refers to a valid Set. +func (d Distinct) Valid() bool { return d.hash != 0 } -// Valid returns true if this value refers to a valid Set. -func (d Distinct) Valid() bool { - return d.iface != nil +// reflectValue abbreviates reflect.ValueOf(d). +func (l Set) reflectValue() reflect.Value { + return reflect.ValueOf(l.data) } // Len returns the number of attributes in this set. func (l *Set) Len() int { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return 0 } - return l.equivalent.reflectValue().Len() + return l.reflectValue().Len() } // Get returns the KeyValue at ordered position idx in this set. func (l *Set) Get(idx int) (KeyValue, bool) { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return KeyValue{}, false } - value := l.equivalent.reflectValue() + value := l.reflectValue() if idx >= 0 && idx < value.Len() { // Note: The Go compiler successfully avoids an allocation for @@ -101,10 +124,10 @@ func (l *Set) Get(idx int) (KeyValue, bool) { // Value returns the value of a specified key in this set. func (l *Set) Value(k Key) (Value, bool) { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return Value{}, false } - rValue := l.equivalent.reflectValue() + rValue := l.reflectValue() vlen := rValue.Len() idx := sort.Search(vlen, func(idx int) bool { @@ -120,7 +143,7 @@ func (l *Set) Value(k Key) (Value, bool) { return Value{}, false } -// HasValue tests whether a key is defined in this set. +// HasValue reports whether a key is defined in this set. func (l *Set) HasValue(k Key) bool { if l == nil { return false @@ -144,20 +167,29 @@ func (l *Set) ToSlice() []KeyValue { return iter.ToSlice() } -// Equivalent returns a value that may be used as a map key. The Distinct type -// guarantees that the result will equal the equivalent. Distinct value of any +// Equivalent returns a value that may be used as a map key. Equal Distinct +// values are very likely to be equivalent attribute Sets. Distinct value of any // attribute set with the same elements as this, where sets are made unique by // choosing the last value in the input for any given key. func (l *Set) Equivalent() Distinct { - if l == nil || !l.equivalent.Valid() { - return emptySet.equivalent + if l == nil || l.hash == 0 { + return Distinct{hash: emptySet.hash} } - return l.equivalent + return Distinct{hash: l.hash} } -// Equals returns true if the argument set is equivalent to this set. +// Equals reports whether the argument set is equivalent to this set. func (l *Set) Equals(o *Set) bool { - return l.Equivalent() == o.Equivalent() + if l.Equivalent() != o.Equivalent() { + return false + } + if l == nil || l.hash == 0 { + l = &emptySet + } + if o == nil || o.hash == 0 { + o = &emptySet + } + return l.data == o.data } // Encoded returns the encoded form of this set, according to encoder. @@ -169,12 +201,6 @@ func (l *Set) Encoded(encoder Encoder) string { return encoder.Encode(l.Iter()) } -func empty() Set { - return Set{ - equivalent: emptySet.equivalent, - } -} - // NewSet returns a new Set. See the documentation for // NewSetWithSortableFiltered for more details. // @@ -204,7 +230,7 @@ func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // Check for empty set. if len(kvs) == 0 { - return empty(), nil + return emptySet, nil } // Stable sort so the following de-duplication can implement @@ -233,10 +259,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if filter != nil { if div := filteredToFront(kvs, filter); div != 0 { - return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + return newSet(kvs[div:]), kvs[:div] } } - return Set{equivalent: computeDistinct(kvs)}, nil + return newSet(kvs), nil } // NewSetWithSortableFiltered returns a new Set. @@ -316,7 +342,7 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { if first == 0 { // It is safe to assume len(slice) >= 1 given we found at least one // attribute above that needs to be filtered out. - return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + return newSet(slice[1:]), slice[:1] } // Move the filtered slice[first] to the front (preserving order). @@ -326,25 +352,24 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { // Do not re-evaluate re(slice[first+1:]). div := filteredToFront(slice[1:first+1], re) + 1 - return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] + return newSet(slice[div:]), slice[:div] } -// computeDistinct returns a Distinct using either the fixed- or -// reflect-oriented code path, depending on the size of the input. The input -// slice is assumed to already be sorted and de-duplicated. -func computeDistinct(kvs []KeyValue) Distinct { - iface := computeDistinctFixed(kvs) - if iface == nil { - iface = computeDistinctReflect(kvs) +// newSet returns a new set based on the sorted and uniqued kvs. +func newSet(kvs []KeyValue) Set { + s := Set{ + hash: hashKVs(kvs), + data: computeDataFixed(kvs), } - return Distinct{ - iface: iface, + if s.data == nil { + s.data = computeDataReflect(kvs) } + return s } -// computeDistinctFixed computes a Distinct for small slices. It returns nil -// if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) interface{} { +// computeDataFixed computes a Set data for small slices. It returns nil if the +// input is too large for this code path. +func computeDataFixed(kvs []KeyValue) any { switch len(kvs) { case 1: return [1]KeyValue(kvs) @@ -371,9 +396,9 @@ func computeDistinctFixed(kvs []KeyValue) interface{} { } } -// computeDistinctReflect computes a Distinct using reflection, works for any -// size input. -func computeDistinctReflect(kvs []KeyValue) interface{} { +// computeDataReflect computes a Set data using reflection, works for any size +// input. +func computeDataReflect(kvs []KeyValue) any { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue @@ -383,11 +408,11 @@ func computeDistinctReflect(kvs []KeyValue) interface{} { // MarshalJSON returns the JSON encoding of the Set. func (l *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(l.equivalent.iface) + return json.Marshal(l.data) } // MarshalLog is the marshaling function used by the logging system to represent this Set. -func (l Set) MarshalLog() interface{} { +func (l Set) MarshalLog() any { kvs := make(map[string]string) for _, kv := range l.ToSlice() { kvs[string(kv.Key)] = kv.Value.Emit() diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go index e584b2477..24f1fa37d 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -24,8 +24,9 @@ const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICE var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Type_index)-1 { return "Type(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Type_name[_Type_index[i]:_Type_index[i+1]] + return _Type_name[_Type_index[idx]:_Type_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 817eecacf..653c33a86 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -22,7 +22,7 @@ type Value struct { vtype Type numeric uint64 stringly string - slice interface{} + slice any } const ( @@ -199,8 +199,8 @@ func (v Value) asStringSlice() []string { type unknownValueType struct{} -// AsInterface returns Value's data as interface{}. -func (v Value) AsInterface() interface{} { +// AsInterface returns Value's data as any. +func (v Value) AsInterface() any { switch v.Type() { case BOOL: return v.AsBool() @@ -262,7 +262,7 @@ func (v Value) Emit() string { func (v Value) MarshalJSON() ([]byte, error) { var jsonVal struct { Type string - Value interface{} + Value any } jsonVal.Type = v.Type().String() jsonVal.Value = v.AsInterface() diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 0e1fe2422..78e98c4c0 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -648,7 +648,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // If we couldn't find any valid key character, // it means the key is either empty or invalid. if keyStart == keyEnd { - return + return p, ok } // Skip spaces after the key: " key< >= value ". @@ -658,13 +658,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // A key can have no value, like: " key ". ok = true p.key = s[keyStart:keyEnd] - return + return p, ok } // If we have not reached the end and we can't find the '=' delimiter, // it means the property is invalid. if s[index] != keyValueDelimiter[0] { - return + return p, ok } // Attempting to parse the value. @@ -690,14 +690,14 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // we have not reached the end, it means the property is // invalid, something like: " key = value value1". if index != len(s) { - return + return p, ok } // Decode a percent-encoded value. rawVal := s[valueStart:valueEnd] unescapeVal, err := url.PathUnescape(rawVal) if err != nil { - return + return p, ok } value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) @@ -706,7 +706,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { p.hasValue = true p.value = value - return + return p, ok } func skipSpace(s string, offset int) int { @@ -812,7 +812,7 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ // validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. // Baggage name is a valid, non-empty UTF-8 string. func validateBaggageName(s string) bool { - if len(s) == 0 { + if s == "" { return false } @@ -828,7 +828,7 @@ func validateBaggageValue(s string) bool { // validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { - if len(s) == 0 { + if s == "" { return false } diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 49a35b122..d48847ed8 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -67,7 +67,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return errors.New("nil receiver passed to UnmarshalJSON") } - var x interface{} + var x any if err := json.Unmarshal(b, &x); err != nil { return err } @@ -102,5 +102,5 @@ func (c *Code) MarshalJSON() ([]byte, error) { if !ok { return nil, fmt.Errorf("invalid code: %d", *c) } - return []byte(fmt.Sprintf("%q", str)), nil + return fmt.Appendf(nil, "%q", str), nil } diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index 935bd4876..cadb87cc0 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. -FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python -FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver +FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python +FROM otel/weaver:v0.19.0@sha256:3d20814cef548f1d31f27f054fb4cd6a05125641a9f7cc29fc7eb234e8052cd9 AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index adbca7d34..86d7f4ba0 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -41,22 +41,22 @@ func GetLogger() logr.Logger { // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. -func Info(msg string, keysAndValues ...interface{}) { +func Info(msg string, keysAndValues ...any) { GetLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. -func Error(err error, msg string, keysAndValues ...interface{}) { +func Error(err error, msg string, keysAndValues ...any) { GetLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. -func Debug(msg string, keysAndValues ...interface{}) { +func Debug(msg string, keysAndValues ...any) { GetLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. -func Warn(msg string, keysAndValues ...interface{}) { +func Warn(msg string, keysAndValues ...any) { GetLogger().V(1).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index adb37b5b0..6db969f73 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -105,7 +105,7 @@ type delegatedInstrument interface { setDelegate(metric.Meter) } -// instID are the identifying properties of a instrument. +// instID are the identifying properties of an instrument. type instID struct { // name is the name of the stream. name string diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 49e4ac4fa..bf5cf3119 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -26,6 +26,7 @@ import ( "sync/atomic" "go.opentelemetry.io/auto/sdk" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go index 1e6473b32..527d9aec8 100644 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -11,7 +11,7 @@ import ( // Meter returns a Meter from the global MeterProvider. The name must be the // name of the library providing instrumentation. This name may be the same as // the instrumented code only if that code provides built-in instrumentation. -// If the name is empty, then a implementation defined default name will be +// If the name is empty, then an implementation defined default name will be // used instead. // // If this is called before a global MeterProvider is registered the returned diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/metric/LICENSE +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index d9e3b13e4..e42dd6e70 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -3,7 +3,11 @@ package metric // import "go.opentelemetry.io/otel/metric" -import "go.opentelemetry.io/otel/attribute" +import ( + "slices" + + "go.opentelemetry.io/otel/attribute" +) // MeterConfig contains options for Meters. type MeterConfig struct { @@ -62,12 +66,38 @@ func WithInstrumentationVersion(version string) MeterOption { }) } -// WithInstrumentationAttributes sets the instrumentation attributes. +// WithInstrumentationAttributes adds the instrumentation attributes. +// +// This is equivalent to calling [WithInstrumentationAttributeSet] with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. // -// The passed attributes will be de-duplicated. +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet adds the instrumentation attributes. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) MeterOption { + if set.Len() == 0 { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + return config + }) + } + return meterOptionFunc(func(config MeterConfig) MeterConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index ebda5026d..051882602 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -20,7 +20,7 @@ type Baggage struct{} var _ TextMapPropagator = Baggage{} // Inject sets baggage key-values from ctx into the carrier. -func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { +func (Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { bStr := baggage.FromContext(ctx).String() if bStr != "" { carrier.Set(baggageHeader, bStr) @@ -30,7 +30,7 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { // Extract returns a copy of parent with the baggage from the carrier added. // If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked // for multiple values extraction. Otherwise, Get is called. -func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { +func (Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { if multiCarrier, ok := carrier.(ValuesGetter); ok { return extractMultiBaggage(parent, multiCarrier) } @@ -38,7 +38,7 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context } // Fields returns the keys who's values are set with Inject. -func (b Baggage) Fields() []string { +func (Baggage) Fields() []string { return []string{baggageHeader} } diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go index 5c8c26ea2..0a32c59aa 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -20,7 +20,7 @@ type TextMapCarrier interface { // must never be done outside of a new major release. // Set stores the key-value pair. - Set(key string, value string) + Set(key, value string) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. @@ -88,7 +88,7 @@ func (hc HeaderCarrier) Values(key string) []string { } // Set stores the key-value pair. -func (hc HeaderCarrier) Set(key string, value string) { +func (hc HeaderCarrier) Set(key, value string) { http.Header(hc).Set(key, value) } diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 6870e316d..271ab71f1 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -36,7 +36,7 @@ var ( ) // Inject injects the trace context from ctx into carrier. -func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { +func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { return @@ -77,7 +77,7 @@ func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) cont return trace.ContextWithRemoteSpanContext(ctx, sc) } -func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { +func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { h := carrier.Get(traceparentHeader) if h == "" { return trace.SpanContext{} @@ -111,7 +111,7 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { } // Clear all flags other than the trace-context supported sampling bit. - scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled // nolint:gosec // slice size already checked. // Ignore the error returned here. Failure to parse tracestate MUST NOT // affect the parsing of traceparent according to the W3C tracecontext @@ -151,6 +151,6 @@ func extractPart(dst []byte, h *string, n int) bool { } // Fields returns the keys who's values are set with Inject. -func (tc TraceContext) Fields() []string { +func (TraceContext) Fields() []string { return []string{traceparentHeader, tracestateHeader} } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md deleted file mode 100644 index 2de1fc3c6..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.26.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go deleted file mode 100644 index d8dc822b2..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go +++ /dev/null @@ -1,8996 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -import "go.opentelemetry.io/otel/attribute" - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// ASP.NET Core attributes -const ( - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - - // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to - // the "aspnetcore.diagnostics.handler.type" semantic conventions. It - // represents the full type name of the - // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) - // implementation that handled the exception. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if and only if the exception - // was handled by this handler.) - // Stability: stable - // Examples: 'Contoso.MyHandler' - AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") - - // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming - // to the "aspnetcore.diagnostics.exception.result" semantic conventions. - // It represents the aSP.NET Core exception middleware handling result - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'handled', 'unhandled' - AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") - - // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.policy" semantic conventions. It represents - // the rate limiting policy name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'fixed', 'sliding', 'token' - AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the - // "aspnetcore.request.is_unhandled" semantic conventions. It represents - // the flag indicating if request was handled by the application pipeline. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") - - // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the - // "aspnetcore.routing.is_fallback" semantic conventions. It represents a - // value that indicates whether the matched route is a fallback route. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") - - // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the - // "aspnetcore.routing.match_status" semantic conventions. It represents - // the match result - success or failure - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'success', 'failure' - AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") -) - -var ( - // Lease was acquired - AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") - // Lease request was rejected by the endpoint limiter - AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") - // Lease request was rejected by the global limiter - AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") - // Lease request was canceled - AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") -) - -var ( - // Exception was handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") - // Exception was not handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") - // Exception handling was skipped because the response had started - AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") - // Exception handling didn't run because the request was aborted - AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") -) - -var ( - // Match succeeded - AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") - // Match failed - AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") -) - -// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming -// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It -// represents the full type name of the -// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) -// implementation that handled the exception. -func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { - return AspnetcoreDiagnosticsHandlerTypeKey.String(val) -} - -// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to -// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents -// the rate limiting policy name. -func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { - return AspnetcoreRateLimitingPolicyKey.String(val) -} - -// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to -// the "aspnetcore.request.is_unhandled" semantic conventions. It represents -// the flag indicating if request was handled by the application pipeline. -func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { - return AspnetcoreRequestIsUnhandledKey.Bool(val) -} - -// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to -// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a -// value that indicates whether the matched route is a fallback route. -func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { - return AspnetcoreRoutingIsFallbackKey.Bool(val) -} - -// Generic attributes for AWS services. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes for AWS DynamoDB. -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") - - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the number of -// items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// Attributes for AWS Elastic Container Service (ECS). -const ( - // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" - // semantic conventions. It represents the ID of a running ECS task. The ID - // MUST be extracted from `task.arn`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is - // populated.) - // Stability: experimental - // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', - // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a - // running [ECS - // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', - // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the family - // name of the [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) - // used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for the task definition used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSTaskID returns an attribute KeyValue conforming to the -// "aws.ecs.task.id" semantic conventions. It represents the ID of a running -// ECS task. The ID MUST be extracted from `task.arn`. -func AWSECSTaskID(val string) attribute.KeyValue { - return AWSECSTaskIDKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running -// [ECS -// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the family name of -// the [ECS task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) -// used to create the ECS task. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// the task definition used to create the ECS task. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Attributes for AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Attributes for AWS Logs. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Attributes for AWS Lambda. -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for AWS S3. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// The web browser attributes -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// A cloud environment (e.g. GCP, Azure, AWS). -const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Apps - CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// Attributes for CloudEvents. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// A container instance. -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerCPUStateKey is the attribute Key conforming to the - // "container.cpu.state" semantic conventions. It represents the CPU state - // for this data point. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'user', 'kernel' - ContainerCPUStateKey = attribute.Key("container.cpu.state") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assigned by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -var ( - // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) - ContainerCPUStateUser = ContainerCPUStateKey.String("user") - // When CPU is used by the system (host OS) - ContainerCPUStateSystem = ContainerCPUStateKey.String("system") - // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) - ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// This group defines the attributes used to describe telemetry in the context -// of databases. -const ( - // DBClientConnectionsPoolNameKey is the attribute Key conforming to the - // "db.client.connections.pool.name" semantic conventions. It represents - // the name of the connection pool; unique within the instrumented - // application. In case the connection pool implementation doesn't provide - // a name, instrumentation should use a combination of `server.address` and - // `server.port` attributes formatted as `server.address:server.port`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myDataSource' - DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") - - // DBClientConnectionsStateKey is the attribute Key conforming to the - // "db.client.connections.state" semantic conventions. It represents the - // state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle' - DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") - - // DBCollectionNameKey is the attribute Key conforming to the - // "db.collection.name" semantic conventions. It represents the name of a - // collection (table, container) within the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: If the collection name is parsed from the query, it SHOULD match - // the value provided in the query and may be qualified with the schema and - // database name. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBCollectionNameKey = attribute.Key("db.collection.name") - - // DBNamespaceKey is the attribute Key conforming to the "db.namespace" - // semantic conventions. It represents the name of the database, fully - // qualified within the server address and port. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'test.users' - // Note: If a database system has multiple namespace components, they - // SHOULD be concatenated (potentially using database system specific - // conventions) from most general to most specific namespace component, and - // more specific namespaces SHOULD NOT be captured without the more general - // namespaces, to ensure that "startswith" queries for the more general - // namespaces will be valid. - // Semantic conventions for individual database systems SHOULD document - // what `db.namespace` means in the context of that system. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBNamespaceKey = attribute.Key("db.namespace") - - // DBOperationNameKey is the attribute Key conforming to the - // "db.operation.name" semantic conventions. It represents the name of the - // operation or command being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: It is RECOMMENDED to capture the value as provided by the - // application without attempting to do any case normalization. - DBOperationNameKey = attribute.Key("db.operation.name") - - // DBQueryTextKey is the attribute Key conforming to the "db.query.text" - // semantic conventions. It represents the database query being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey - // "WuValue"' - DBQueryTextKey = attribute.Key("db.query.text") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents the database management system (DBMS) product - // as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual DBMS may differ from the one identified by the client. - // For example, when using PostgreSQL client libraries to connect to a - // CockroachDB, the `db.system` is set to `postgresql` based on the - // instrumentation's best knowledge. - DBSystemKey = attribute.Key("db.system") -) - -var ( - // idle - DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") - // used - DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBClientConnectionsPoolName returns an attribute KeyValue conforming to -// the "db.client.connections.pool.name" semantic conventions. It represents -// the name of the connection pool; unique within the instrumented application. -// In case the connection pool implementation doesn't provide a name, -// instrumentation should use a combination of `server.address` and -// `server.port` attributes formatted as `server.address:server.port`. -func DBClientConnectionsPoolName(val string) attribute.KeyValue { - return DBClientConnectionsPoolNameKey.String(val) -} - -// DBCollectionName returns an attribute KeyValue conforming to the -// "db.collection.name" semantic conventions. It represents the name of a -// collection (table, container) within the database. -func DBCollectionName(val string) attribute.KeyValue { - return DBCollectionNameKey.String(val) -} - -// DBNamespace returns an attribute KeyValue conforming to the -// "db.namespace" semantic conventions. It represents the name of the database, -// fully qualified within the server address and port. -func DBNamespace(val string) attribute.KeyValue { - return DBNamespaceKey.String(val) -} - -// DBOperationName returns an attribute KeyValue conforming to the -// "db.operation.name" semantic conventions. It represents the name of the -// operation or command being executed. -func DBOperationName(val string) attribute.KeyValue { - return DBOperationNameKey.String(val) -} - -// DBQueryText returns an attribute KeyValue conforming to the -// "db.query.text" semantic conventions. It represents the database query being -// executed. -func DBQueryText(val string) attribute.KeyValue { - return DBQueryTextKey.String(val) -} - -// This group defines attributes for Cassandra. -const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// This group defines attributes for Azure Cosmos DB. -const ( - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// This group defines attributes for Elasticsearch. -const ( - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") -) - -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// Attributes for software deployments. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// Attributes that represents an occurrence of a lifecycle transition on the -// Android platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the deprecated use the - // `device.app.lifecycle` event definition including `android.state` as a - // payload field instead. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// Describes device attributes. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// These attributes may be used for any disk related operation. -const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// The shared attributes used to report a DNS query. -const ( - // DNSQuestionNameKey is the attribute Key conforming to the - // "dns.question.name" semantic conventions. It represents the name being - // queried. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.example.com', 'opentelemetry.io' - // Note: If the name field contains non-printable characters (below 32 or - // above 126), those characters should be represented as escaped base 10 - // integers (\DDD). Back slashes and quotes should be escaped. Tabs, - // carriage returns, and line feeds should be converted to \t, \r, and \n - // respectively. - DNSQuestionNameKey = attribute.Key("dns.question.name") -) - -// DNSQuestionName returns an attribute KeyValue conforming to the -// "dns.question.name" semantic conventions. It represents the name being -// queried. -func DNSQuestionName(val string) attribute.KeyValue { - return DNSQuestionNameKey.String(val) -} - -// Attributes for operations with an authenticated and/or authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// The shared attributes used to report an error. -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable, and SHOULD have low - // cardinality. - // - // When `error.type` is set to a type (e.g., an exception type), its - // canonical class name identifying the type within the artifact SHOULD be - // used. - // - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: - // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// FaaS attributes -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") - - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// Attributes for Feature Flags. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// Describes file attributes. -const ( - // FileDirectoryKey is the attribute Key conforming to the "file.directory" - // semantic conventions. It represents the directory where the file is - // located. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/user', 'C:\\Program Files\\MyApp' - FileDirectoryKey = attribute.Key("file.directory") - - // FileExtensionKey is the attribute Key conforming to the "file.extension" - // semantic conventions. It represents the file extension, excluding the - // leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: When the file name has multiple extensions (example.tar.gz), only - // the last one should be captured ("gz", not "tar.gz"). - FileExtensionKey = attribute.Key("file.extension") - - // FileNameKey is the attribute Key conforming to the "file.name" semantic - // conventions. It represents the name of the file including the extension, - // without the directory. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.png' - FileNameKey = attribute.Key("file.name") - - // FilePathKey is the attribute Key conforming to the "file.path" semantic - // conventions. It represents the full path to the file, including the file - // name. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/alice/example.png', 'C:\\Program - // Files\\MyApp\\myapp.exe' - FilePathKey = attribute.Key("file.path") - - // FileSizeKey is the attribute Key conforming to the "file.size" semantic - // conventions. It represents the file size in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - FileSizeKey = attribute.Key("file.size") -) - -// FileDirectory returns an attribute KeyValue conforming to the -// "file.directory" semantic conventions. It represents the directory where the -// file is located. It should include the drive letter, when appropriate. -func FileDirectory(val string) attribute.KeyValue { - return FileDirectoryKey.String(val) -} - -// FileExtension returns an attribute KeyValue conforming to the -// "file.extension" semantic conventions. It represents the file extension, -// excluding the leading dot. -func FileExtension(val string) attribute.KeyValue { - return FileExtensionKey.String(val) -} - -// FileName returns an attribute KeyValue conforming to the "file.name" -// semantic conventions. It represents the name of the file including the -// extension, without the directory. -func FileName(val string) attribute.KeyValue { - return FileNameKey.String(val) -} - -// FilePath returns an attribute KeyValue conforming to the "file.path" -// semantic conventions. It represents the full path to the file, including the -// file name. It should include the drive letter, when appropriate. -func FilePath(val string) attribute.KeyValue { - return FilePathKey.String(val) -} - -// FileSize returns an attribute KeyValue conforming to the "file.size" -// semantic conventions. It represents the file size in bytes. -func FileSize(val int) attribute.KeyValue { - return FileSizeKey.Int(val) -} - -// Attributes for Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Attributes for Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// The attributes used to describe telemetry in the context of LLM (Large -// Language Models) requests and responses. -const ( - // GenAiCompletionKey is the attribute Key conforming to the - // "gen_ai.completion" semantic conventions. It represents the full - // response received from the LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'assistant', 'content': 'The capital of France is - // Paris.'}]" - // Note: It's RECOMMENDED to format completions as JSON string matching - // [OpenAI messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiCompletionKey = attribute.Key("gen_ai.completion") - - // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" - // semantic conventions. It represents the full prompt sent to an LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'user', 'content': 'What is the capital of - // France?'}]" - // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI - // messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiPromptKey = attribute.Key("gen_ai.prompt") - - // GenAiRequestMaxTokensKey is the attribute Key conforming to the - // "gen_ai.request.max_tokens" semantic conventions. It represents the - // maximum number of tokens the LLM generates for a request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") - - // GenAiRequestModelKey is the attribute Key conforming to the - // "gen_ai.request.model" semantic conventions. It represents the name of - // the LLM a request is being made to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4' - GenAiRequestModelKey = attribute.Key("gen_ai.request.model") - - // GenAiRequestTemperatureKey is the attribute Key conforming to the - // "gen_ai.request.temperature" semantic conventions. It represents the - // temperature setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0.0 - GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") - - // GenAiRequestTopPKey is the attribute Key conforming to the - // "gen_ai.request.top_p" semantic conventions. It represents the top_p - // sampling setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0 - GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") - - // GenAiResponseFinishReasonsKey is the attribute Key conforming to the - // "gen_ai.response.finish_reasons" semantic conventions. It represents the - // array of reasons the model stopped generating tokens, corresponding to - // each generation received. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'stop' - GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") - - // GenAiResponseIDKey is the attribute Key conforming to the - // "gen_ai.response.id" semantic conventions. It represents the unique - // identifier for the completion. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'chatcmpl-123' - GenAiResponseIDKey = attribute.Key("gen_ai.response.id") - - // GenAiResponseModelKey is the attribute Key conforming to the - // "gen_ai.response.model" semantic conventions. It represents the name of - // the LLM a response was generated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4-0613' - GenAiResponseModelKey = attribute.Key("gen_ai.response.model") - - // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" - // semantic conventions. It represents the Generative AI product as - // identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'openai' - // Note: The actual GenAI product may differ from the one identified by the - // client. For example, when using OpenAI client libraries to communicate - // with Mistral, the `gen_ai.system` is set to `openai` based on the - // instrumentation's best knowledge. - GenAiSystemKey = attribute.Key("gen_ai.system") - - // GenAiUsageCompletionTokensKey is the attribute Key conforming to the - // "gen_ai.usage.completion_tokens" semantic conventions. It represents the - // number of tokens used in the LLM response (completion). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 180 - GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") - - // GenAiUsagePromptTokensKey is the attribute Key conforming to the - // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the - // number of tokens used in the LLM prompt. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") -) - -var ( - // OpenAI - GenAiSystemOpenai = GenAiSystemKey.String("openai") -) - -// GenAiCompletion returns an attribute KeyValue conforming to the -// "gen_ai.completion" semantic conventions. It represents the full response -// received from the LLM. -func GenAiCompletion(val string) attribute.KeyValue { - return GenAiCompletionKey.String(val) -} - -// GenAiPrompt returns an attribute KeyValue conforming to the -// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to -// an LLM. -func GenAiPrompt(val string) attribute.KeyValue { - return GenAiPromptKey.String(val) -} - -// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the -// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum -// number of tokens the LLM generates for a request. -func GenAiRequestMaxTokens(val int) attribute.KeyValue { - return GenAiRequestMaxTokensKey.Int(val) -} - -// GenAiRequestModel returns an attribute KeyValue conforming to the -// "gen_ai.request.model" semantic conventions. It represents the name of the -// LLM a request is being made to. -func GenAiRequestModel(val string) attribute.KeyValue { - return GenAiRequestModelKey.String(val) -} - -// GenAiRequestTemperature returns an attribute KeyValue conforming to the -// "gen_ai.request.temperature" semantic conventions. It represents the -// temperature setting for the LLM request. -func GenAiRequestTemperature(val float64) attribute.KeyValue { - return GenAiRequestTemperatureKey.Float64(val) -} - -// GenAiRequestTopP returns an attribute KeyValue conforming to the -// "gen_ai.request.top_p" semantic conventions. It represents the top_p -// sampling setting for the LLM request. -func GenAiRequestTopP(val float64) attribute.KeyValue { - return GenAiRequestTopPKey.Float64(val) -} - -// GenAiResponseFinishReasons returns an attribute KeyValue conforming to -// the "gen_ai.response.finish_reasons" semantic conventions. It represents the -// array of reasons the model stopped generating tokens, corresponding to each -// generation received. -func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { - return GenAiResponseFinishReasonsKey.StringSlice(val) -} - -// GenAiResponseID returns an attribute KeyValue conforming to the -// "gen_ai.response.id" semantic conventions. It represents the unique -// identifier for the completion. -func GenAiResponseID(val string) attribute.KeyValue { - return GenAiResponseIDKey.String(val) -} - -// GenAiResponseModel returns an attribute KeyValue conforming to the -// "gen_ai.response.model" semantic conventions. It represents the name of the -// LLM a response was generated from. -func GenAiResponseModel(val string) attribute.KeyValue { - return GenAiResponseModelKey.String(val) -} - -// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to -// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the -// number of tokens used in the LLM response (completion). -func GenAiUsageCompletionTokens(val int) attribute.KeyValue { - return GenAiUsageCompletionTokensKey.Int(val) -} - -// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number -// of tokens used in the LLM prompt. -func GenAiUsagePromptTokens(val int) attribute.KeyValue { - return GenAiUsagePromptTokensKey.Int(val) -} - -// Attributes for GraphQL. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// Attributes for the Android platform on which the Android application is -// running. -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1', 'r1p1' - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val string) attribute.KeyValue { - return HostCPUSteppingKey.String(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Semantic convention attributes in the HTTP namespace. -const ( - // HTTPConnectionStateKey is the attribute Key conforming to the - // "http.connection.state" semantic conventions. It represents the state of - // the HTTP connection in the HTTP connection pool. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'active', 'idle' - HTTPConnectionStateKey = attribute.Key("http.connection.state") - - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the - // ordinal number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPRequestSizeKey is the attribute Key conforming to the - // "http.request.size" semantic conventions. It represents the total size - // of the request in bytes. This should be the total number of bytes sent - // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 - // and HTTP/3), headers, and request body if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPRequestSizeKey = attribute.Key("http.request.size") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseSizeKey is the attribute Key conforming to the - // "http.response.size" semantic conventions. It represents the total size - // of the response in bytes. This should be the total number of bytes sent - // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and - // HTTP/3), headers, and response body and trailers if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPResponseSizeKey = attribute.Key("http.response.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route, that is, the path - // template in the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -var ( - // active state - HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") - // idle state - HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPRequestSize returns an attribute KeyValue conforming to the -// "http.request.size" semantic conventions. It represents the total size of -// the request in bytes. This should be the total number of bytes sent over the -// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and request body if any. -func HTTPRequestSize(val int) attribute.KeyValue { - return HTTPRequestSizeKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseSize returns an attribute KeyValue conforming to the -// "http.response.size" semantic conventions. It represents the total size of -// the response in bytes. This should be the total number of bytes sent over -// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and response body and trailers if any. -func HTTPResponseSize(val int) attribute.KeyValue { - return HTTPResponseSizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Java Virtual machine related attributes. -const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") - - // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" - // semantic conventions. It represents the name of the garbage collector - // action. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'end of minor GC', 'end of major GC' - // Note: Garbage collector action is generally obtained via - // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). - JvmGcActionKey = attribute.Key("jvm.gc.action") - - // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" - // semantic conventions. It represents the name of the garbage collector. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Young Generation', 'G1 Old Generation' - // Note: Garbage collector name is generally obtained via - // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). - JvmGcNameKey = attribute.Key("jvm.gc.name") - - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") - - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") - - // JvmThreadDaemonKey is the attribute Key conforming to the - // "jvm.thread.daemon" semantic conventions. It represents the whether the - // thread is daemon or not. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") - - // JvmThreadStateKey is the attribute Key conforming to the - // "jvm.thread.state" semantic conventions. It represents the state of the - // thread. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'runnable', 'blocked' - JvmThreadStateKey = attribute.Key("jvm.thread.state") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -var ( - // A thread that has not yet started is in this state - JvmThreadStateNew = JvmThreadStateKey.String("new") - // A thread executing in the Java virtual machine is in this state - JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") - // A thread that is blocked waiting for a monitor lock is in this state - JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") - // A thread that is waiting indefinitely for another thread to perform a particular action is in this state - JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") - // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state - JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") - // A thread that has exited is in this state - JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") -) - -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) -} - -// JvmGcAction returns an attribute KeyValue conforming to the -// "jvm.gc.action" semantic conventions. It represents the name of the garbage -// collector action. -func JvmGcAction(val string) attribute.KeyValue { - return JvmGcActionKey.String(val) -} - -// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" -// semantic conventions. It represents the name of the garbage collector. -func JvmGcName(val string) attribute.KeyValue { - return JvmGcNameKey.String(val) -} - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// JvmThreadDaemon returns an attribute KeyValue conforming to the -// "jvm.thread.daemon" semantic conventions. It represents the whether the -// thread is daemon or not. -func JvmThreadDaemon(val bool) attribute.KeyValue { - return JvmThreadDaemonKey.Bool(val) -} - -// Kubernetes resource attributes. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SContainerStatusLastTerminatedReasonKey is the attribute Key - // conforming to the "k8s.container.status.last_terminated_reason" semantic - // conventions. It represents the last terminated reason of the Container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Evicted', 'Error' - K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue -// conforming to the "k8s.container.status.last_terminated_reason" semantic -// conventions. It represents the last terminated reason of the Container. -func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { - return K8SContainerStatusLastTerminatedReasonKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// Attributes for a file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// The generic attributes that may be used in any Log Record. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Attributes describing telemetry around messaging systems and messaging -// activities. -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client.id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client.id") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationPartitionIDKey is the attribute Key conforming to - // the "messaging.destination.partition.id" semantic conventions. It - // represents the identifier of the partition messages are sent to or - // received from, unique within the `messaging.destination.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1' - MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationPublishAnonymousKey is the attribute Key conforming - // to the "messaging.destination_publish.anonymous" semantic conventions. - // It represents a boolean that is true if the publish message destination - // is anonymous (could be unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") - - // MessagingDestinationPublishNameKey is the attribute Key conforming to - // the "messaging.destination_publish.name" semantic conventions. It - // represents the name of the original destination the message was - // published to - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: The name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker doesn't have such notion, the original destination name - // SHOULD uniquely identify the broker. - MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the - // size of the message body in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1439 - // Note: This can refer to both the compressed or uncompressed body size. - // If both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the conversation ID identifying the conversation to which the message - // belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents - // the size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2738 - // Note: This can refer to both the compressed or uncompressed size. If - // both sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationNameKey is the attribute Key conforming to the - // "messaging.operation.name" semantic conventions. It represents the - // system-specific name of the messaging operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack', 'nack', 'send' - MessagingOperationNameKey = attribute.Key("messaging.operation.name") - - // MessagingOperationTypeKey is the attribute Key conforming to the - // "messaging.operation.type" semantic conventions. It represents a string - // identifying the type of the messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationTypeKey = attribute.Key("messaging.operation.type") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents the messaging - // system as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual messaging system may differ from the one known by the - // client. For example, when using Kafka client libraries to communicate - // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on - // the instrumentation's best knowledge. - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") - // One or more messages are delivered to or processed by a consumer - MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") - // One or more messages are settled - MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") -) - -var ( - // Apache ActiveMQ - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") - // Azure Event Hubs - MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") - // Azure Service Bus - MessagingSystemServicebus = MessagingSystemKey.String("servicebus") - // Google Cloud Pub/Sub - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client.id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationPartitionID returns an attribute KeyValue conforming -// to the "messaging.destination.partition.id" semantic conventions. It -// represents the identifier of the partition messages are sent to or received -// from, unique within the `messaging.destination.name`. -func MessagingDestinationPartitionID(val string) attribute.KeyValue { - return MessagingDestinationPartitionIDKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationPublishAnonymous returns an attribute KeyValue -// conforming to the "messaging.destination_publish.anonymous" semantic -// conventions. It represents a boolean that is true if the publish message -// destination is anonymous (could be unnamed or have auto-generated name). -func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationPublishAnonymousKey.Bool(val) -} - -// MessagingDestinationPublishName returns an attribute KeyValue conforming -// to the "messaging.destination_publish.name" semantic conventions. It -// represents the name of the original destination the message was published to -func MessagingDestinationPublishName(val string) attribute.KeyValue { - return MessagingDestinationPublishNameKey.String(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingOperationName returns an attribute KeyValue conforming to the -// "messaging.operation.name" semantic conventions. It represents the -// system-specific name of the messaging operation. -func MessagingOperationName(val string) attribute.KeyValue { - return MessagingOperationNameKey.String(val) -} - -// This group describes attributes specific to Apache Kafka. -const ( - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") -) - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// This group describes attributes specific to RabbitMQ. -const ( - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming - // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. - // It represents the rabbitMQ message delivery tag - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 123 - MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") -) - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic -// conventions. It represents the rabbitMQ message delivery tag -func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { - return MessagingRabbitmqMessageDeliveryTagKey.Int(val) -} - -// This group describes attributes specific to RocketMQ. -const ( - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// This group describes attributes specific to GCP Pub/Sub. -const ( - // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. - // It represents the ack deadline in seconds set for the modify ack - // deadline request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") - - // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the - // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It - // represents the ack id for a given message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack_id' - MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") - - // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key - // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" - // semantic conventions. It represents the delivery attempt for a given - // message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") -) - -// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic -// conventions. It represents the ack deadline in seconds set for the modify -// ack deadline request. -func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) -} - -// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It -// represents the ack id for a given message. -func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageAckIDKey.String(val) -} - -// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic -// conventions. It represents the delivery attempt for a given message. -func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// This group describes attributes specific to Azure Service Bus. -const ( - // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key - // conforming to the "messaging.servicebus.destination.subscription_name" - // semantic conventions. It represents the name of the subscription in the - // topic messages are received from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mySubscription' - MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") - - // MessagingServicebusDispositionStatusKey is the attribute Key conforming - // to the "messaging.servicebus.disposition_status" semantic conventions. - // It represents the describes the [settlement - // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") - - // MessagingServicebusMessageDeliveryCountKey is the attribute Key - // conforming to the "messaging.servicebus.message.delivery_count" semantic - // conventions. It represents the number of deliveries that have been - // attempted for this message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") - - // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key - // conforming to the "messaging.servicebus.message.enqueued_time" semantic - // conventions. It represents the UTC epoch seconds at which the message - // has been accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") -) - -var ( - // Message is completed - MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") - // Message is abandoned - MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") - // Message is sent to dead letter queue - MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") - // Message is deferred - MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") -) - -// MessagingServicebusDestinationSubscriptionName returns an attribute -// KeyValue conforming to the -// "messaging.servicebus.destination.subscription_name" semantic conventions. -// It represents the name of the subscription in the topic messages are -// received from. -func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { - return MessagingServicebusDestinationSubscriptionNameKey.String(val) -} - -// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.delivery_count" semantic -// conventions. It represents the number of deliveries that have been attempted -// for this message. -func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { - return MessagingServicebusMessageDeliveryCountKey.Int(val) -} - -// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingServicebusMessageEnqueuedTimeKey.Int(val) -} - -// This group describes attributes specific to Azure Event Hubs. -const ( - // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to - // the "messaging.eventhubs.consumer.group" semantic conventions. It - // represents the name of the consumer group the event consumer is - // associated with. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'indexer' - MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") - - // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming - // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. - // It represents the UTC epoch seconds at which the message has been - // accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") -) - -// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming -// to the "messaging.eventhubs.consumer.group" semantic conventions. It -// represents the name of the consumer group the event consumer is associated -// with. -func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { - return MessagingEventhubsConsumerGroupKey.String(val) -} - -// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.eventhubs.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the - // "network.peer.port" semantic conventions. It represents the peer port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // application layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - // Note: The value SHOULD be normalized to lowercase. - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // actual version of the protocol used for network communication. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.1', '2' - // Note: If protocol version is subject to negotiation (for example using - // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute - // SHOULD be set to the negotiated version. If the actual protocol version - // is not known, this attribute SHOULD NOT be set. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // transport layer](https://osi-model.com/transport-layer/) or - // [inter-process communication - // method](https://wikipedia.org/wiki/Inter-process_communication). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port - // 12345. - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI network - // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - // Note: The value SHOULD be normalized to lowercase. - NetworkTypeKey = attribute.Key("network.type") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // transmit - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local -// address of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port -// number of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the actual -// version of the protocol used for network communication. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// An OCI image manifest. -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// Attributes used by the OpenTracing Shim layer. -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// Attributes reserved for OpenTelemetry -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// An operating system process. -const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessContextSwitchTypeKey is the attribute Key conforming to the - // "process.context_switch_type" semantic conventions. It represents the - // specifies whether the context switches for this data point were - // voluntary or involuntary. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") - - // ProcessCreationTimeKey is the attribute Key conforming to the - // "process.creation.time" semantic conventions. It represents the date and - // time the process was created, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:25:34.853Z' - ProcessCreationTimeKey = attribute.Key("process.creation.time") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessExitCodeKey is the attribute Key conforming to the - // "process.exit.code" semantic conventions. It represents the exit code of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 127 - ProcessExitCodeKey = attribute.Key("process.exit.code") - - // ProcessExitTimeKey is the attribute Key conforming to the - // "process.exit.time" semantic conventions. It represents the date and - // time the process exited, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:26:12.315Z' - ProcessExitTimeKey = attribute.Key("process.exit.time") - - // ProcessGroupLeaderPIDKey is the attribute Key conforming to the - // "process.group_leader.pid" semantic conventions. It represents the PID - // of the process's group leader. This is also the process group ID (PGID) - // of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 23 - ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") - - // ProcessInteractiveKey is the attribute Key conforming to the - // "process.interactive" semantic conventions. It represents the whether - // the process is connected to an interactive shell. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - ProcessInteractiveKey = attribute.Key("process.interactive") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessPagingFaultTypeKey is the attribute Key conforming to the - // "process.paging.fault_type" semantic conventions. It represents the type - // of page fault for this data point. Type `major` is for major/hard page - // faults, and `minor` is for minor/soft page faults. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRealUserIDKey is the attribute Key conforming to the - // "process.real_user.id" semantic conventions. It represents the real user - // ID (RUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000 - ProcessRealUserIDKey = attribute.Key("process.real_user.id") - - // ProcessRealUserNameKey is the attribute Key conforming to the - // "process.real_user.name" semantic conventions. It represents the - // username of the real user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessRealUserNameKey = attribute.Key("process.real_user.name") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessSavedUserIDKey is the attribute Key conforming to the - // "process.saved_user.id" semantic conventions. It represents the saved - // user ID (SUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1002 - ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") - - // ProcessSavedUserNameKey is the attribute Key conforming to the - // "process.saved_user.name" semantic conventions. It represents the - // username of the saved user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") - - // ProcessSessionLeaderPIDKey is the attribute Key conforming to the - // "process.session_leader.pid" semantic conventions. It represents the PID - // of the process's session leader. This is also the session ID (SID) of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 14 - ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") - - // ProcessUserIDKey is the attribute Key conforming to the - // "process.user.id" semantic conventions. It represents the effective user - // ID (EUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1001 - ProcessUserIDKey = attribute.Key("process.user.id") - - // ProcessUserNameKey is the attribute Key conforming to the - // "process.user.name" semantic conventions. It represents the username of - // the effective user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessUserNameKey = attribute.Key("process.user.name") - - // ProcessVpidKey is the attribute Key conforming to the "process.vpid" - // semantic conventions. It represents the virtual process identifier. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12 - // Note: The process ID within a PID namespace. This is not necessarily - // unique across all processes on the host but it is unique within the - // process namespace that the process exists within. - ProcessVpidKey = attribute.Key("process.vpid") -) - -var ( - // voluntary - ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") - // involuntary - ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") -) - -var ( - // major - ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") - // minor - ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") -) - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCreationTime returns an attribute KeyValue conforming to the -// "process.creation.time" semantic conventions. It represents the date and -// time the process was created, in ISO 8601 format. -func ProcessCreationTime(val string) attribute.KeyValue { - return ProcessCreationTimeKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessExitCode returns an attribute KeyValue conforming to the -// "process.exit.code" semantic conventions. It represents the exit code of the -// process. -func ProcessExitCode(val int) attribute.KeyValue { - return ProcessExitCodeKey.Int(val) -} - -// ProcessExitTime returns an attribute KeyValue conforming to the -// "process.exit.time" semantic conventions. It represents the date and time -// the process exited, in ISO 8601 format. -func ProcessExitTime(val string) attribute.KeyValue { - return ProcessExitTimeKey.String(val) -} - -// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the -// "process.group_leader.pid" semantic conventions. It represents the PID of -// the process's group leader. This is also the process group ID (PGID) of the -// process. -func ProcessGroupLeaderPID(val int) attribute.KeyValue { - return ProcessGroupLeaderPIDKey.Int(val) -} - -// ProcessInteractive returns an attribute KeyValue conforming to the -// "process.interactive" semantic conventions. It represents the whether the -// process is connected to an interactive shell. -func ProcessInteractive(val bool) attribute.KeyValue { - return ProcessInteractiveKey.Bool(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRealUserID returns an attribute KeyValue conforming to the -// "process.real_user.id" semantic conventions. It represents the real user ID -// (RUID) of the process. -func ProcessRealUserID(val int) attribute.KeyValue { - return ProcessRealUserIDKey.Int(val) -} - -// ProcessRealUserName returns an attribute KeyValue conforming to the -// "process.real_user.name" semantic conventions. It represents the username of -// the real user of the process. -func ProcessRealUserName(val string) attribute.KeyValue { - return ProcessRealUserNameKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessSavedUserID returns an attribute KeyValue conforming to the -// "process.saved_user.id" semantic conventions. It represents the saved user -// ID (SUID) of the process. -func ProcessSavedUserID(val int) attribute.KeyValue { - return ProcessSavedUserIDKey.Int(val) -} - -// ProcessSavedUserName returns an attribute KeyValue conforming to the -// "process.saved_user.name" semantic conventions. It represents the username -// of the saved user. -func ProcessSavedUserName(val string) attribute.KeyValue { - return ProcessSavedUserNameKey.String(val) -} - -// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the -// "process.session_leader.pid" semantic conventions. It represents the PID of -// the process's session leader. This is also the session ID (SID) of the -// process. -func ProcessSessionLeaderPID(val int) attribute.KeyValue { - return ProcessSessionLeaderPIDKey.Int(val) -} - -// ProcessUserID returns an attribute KeyValue conforming to the -// "process.user.id" semantic conventions. It represents the effective user ID -// (EUID) of the process. -func ProcessUserID(val int) attribute.KeyValue { - return ProcessUserIDKey.Int(val) -} - -// ProcessUserName returns an attribute KeyValue conforming to the -// "process.user.name" semantic conventions. It represents the username of the -// effective user of the process. -func ProcessUserName(val string) attribute.KeyValue { - return ProcessUserNameKey.String(val) -} - -// ProcessVpid returns an attribute KeyValue conforming to the -// "process.vpid" semantic conventions. It represents the virtual process -// identifier. -func ProcessVpid(val int) attribute.KeyValue { - return ProcessVpidKey.Int(val) -} - -// Attributes for process CPU -const ( - // ProcessCPUStateKey is the attribute Key conforming to the - // "process.cpu.state" semantic conventions. It represents the CPU state of - // the process. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessCPUStateKey = attribute.Key("process.cpu.state") -) - -var ( - // system - ProcessCPUStateSystem = ProcessCPUStateKey.String("system") - // user - ProcessCPUStateUser = ProcessCPUStateKey.String("user") - // wait - ProcessCPUStateWait = ProcessCPUStateKey.String("wait") -) - -// Attributes for remote procedure calls. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMessageCompressedSizeKey is the attribute Key conforming to the - // "rpc.message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") - - // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Note: This way we guarantee that the values will be consistent between - // different implementations. - RPCMessageIDKey = attribute.Key("rpc.message.id") - - // RPCMessageTypeKey is the attribute Key conforming to the - // "rpc.message.type" semantic conventions. It represents the whether this - // is a received or sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCMessageTypeKey = attribute.Key("rpc.message.type") - - // RPCMessageUncompressedSizeKey is the attribute Key conforming to the - // "rpc.message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCSystemKey = attribute.Key("rpc.system") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -var ( - // sent - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - // received - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// doesn't specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMessageCompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.compressed_size" semantic conventions. It represents the -// compressed size of the message in bytes. -func RPCMessageCompressedSize(val int) attribute.KeyValue { - return RPCMessageCompressedSizeKey.Int(val) -} - -// RPCMessageID returns an attribute KeyValue conforming to the -// "rpc.message.id" semantic conventions. It represents the mUST be calculated -// as two different counters starting from `1` one for sent messages and one -// for received message. -func RPCMessageID(val int) attribute.KeyValue { - return RPCMessageIDKey.Int(val) -} - -// RPCMessageUncompressedSize returns an attribute KeyValue conforming to -// the "rpc.message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func RPCMessageUncompressedSize(val int) attribute.KeyValue { - return RPCMessageUncompressedSizeKey.Int(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.address` SHOULD represent the server address - // behind any intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.port` SHOULD represent the server port behind - // any intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the server domain name -// if available without reverse DNS lookup; otherwise, IP address or Unix -// domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// A service instance. -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to - // distinguish instances of the same service that exist at the same time - // (e.g. instances of a horizontally scaled - // service). - // - // Implementations, such as SDKs, are recommended to generate a random - // Version 1 or Version 4 [RFC - // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an - // inherent unique ID as the source of - // this value if stability is desirable. In that case, the ID SHOULD be - // used as source of a UUID Version 5 and - // SHOULD use the following UUID as the namespace: - // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. - // - // UUIDs are typically recommended, as only an opaque value for the - // purposes of identifying a service instance is - // needed. Similar to what can be seen in the man page for the - // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) - // file, the underlying - // data, such as pod name and namespace should be treated as confidential, - // being the user's choice to expose it - // or not via another resource attribute. - // - // For applications running behind an application server (like unicorn), we - // do not recommend using one identifier - // for all processes participating in the application. Instead, it's - // recommended each division (e.g. a worker - // thread in unicorn) to have its own instance.id. - // - // It's not recommended for a Collector to set `service.instance.id` if it - // can't unambiguously determine the - // service instance that is generating that telemetry. For instance, - // creating an UUID based on `pod.name` will - // likely be wrong, as the Collector might not know from which container - // within that pod the telemetry originated. - // However, Collectors can set the `service.instance.id` if they can - // unambiguously determine the service instance - // for that telemetry. This is typically the case for scraping receivers, - // as they know the target address and - // port. - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If - // `process.executable.name` is not available, the value MUST be set to - // `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} - -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Describes System attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// Describes System CPU attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// Describes System Memory attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Describes System Memory Paging attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Describes Filesystem attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process attributes -const ( - // SystemProcessStatusKey is the attribute Key conforming to the - // "system.process.status" semantic conventions. It represents the process - // state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessStatusKey = attribute.Key("system.process.status") -) - -var ( - // running - SystemProcessStatusRunning = SystemProcessStatusKey.String("running") - // sleeping - SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") - // stopped - SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") - // defunct - SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") -) - -// Attributes for telemetry SDK. -const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Semantic convention attributes in the TLS namespace. -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" - // semantic conventions. It represents the string indicating the - // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) - // used during the current connection. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', - // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' - // Note: The values allowed for `tls.cipher` MUST be one of the - // `Descriptions` of the [registered TLS Cipher - // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the client. This is - // usually mutually-exclusive of `client.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the client. This is usually mutually-exclusive of - // `client.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the - // "tls.client.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based - // on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the - // date/Time indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientServerNameKey is the attribute Key conforming to the - // "tls.client.server_name" semantic conventions. It represents the also - // called an SNI, this tells the server which hostname to which the client - // is attempting to connect to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry.io' - TLSClientServerNameKey = attribute.Key("tls.client.server_name") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the - // array of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the - // given cipher, when applicable - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'secp256r1' - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the - // "tls.established" semantic conventions. It represents the boolean flag - // indicating if the TLS negotiation was successful and transitioned to an - // encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the - // "tls.next_protocol" semantic conventions. It represents the string - // indicating the protocol being tunneled. Per the values in the [IANA - // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), - // this string should be lower case. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http/1.1' - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the - // "tls.protocol.name" semantic conventions. It represents the normalized - // lowercase protocol name parsed from original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric - // part of the version parsed from the original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2', '3' - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" - // semantic conventions. It represents the boolean flag indicating if this - // TLS connection was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the server. This is - // usually mutually-exclusive of `server.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the server. This is usually mutually-exclusive of - // `server.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the - // "tls.server.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the - // "tls.server.ja3s" semantic conventions. It represents a hash that - // identifies servers based on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the - // date/Time indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // server. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -var ( - // ssl - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the -// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used -// during the current connection. -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also -// exists in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the client. This is usually mutually-exclusive of `client.certificate` since -// that value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the -// "tls.client.ja3" semantic conventions. It represents a hash that identifies -// clients based on how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientServerName returns an attribute KeyValue conforming to the -// "tls.client.server_name" semantic conventions. It represents the also called -// an SNI, this tells the server which hostname to which the client is -// attempting to connect to. -func TLSClientServerName(val string) attribute.KeyValue { - return TLSClientServerNameKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" -// semantic conventions. It represents the string indicating the curve used for -// the given cipher, when applicable -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string -// indicating the protocol being tunneled. Per the values in the [IANA -// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), -// this string should be lower case. -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part -// of the version parsed from the original string of the negotiated [SSL/TLS -// protocol -// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also -// exists in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the server. This is usually mutually-exclusive of `server.certificate` since -// that value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Attributes describing URL. -const ( - // URLDomainKey is the attribute Key conforming to the "url.domain" - // semantic conventions. It represents the domain extracted from the - // `url.full`, such as "opentelemetry.io". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', - // '[1080:0:0:0:8:800:200C:417A]' - // Note: In some cases a URL may refer to an IP and/or port directly, - // without a domain name. In this case, the IP address would go to the - // domain field. If the URL contains a [literal IPv6 - // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by - // `[` and `]`, the `[` and `]` characters should also be captured in the - // domain field. - URLDomainKey = attribute.Key("url.domain") - - // URLExtensionKey is the attribute Key conforming to the "url.extension" - // semantic conventions. It represents the file extension extracted from - // the `url.full`, excluding the leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: The file extension is only set if it exists, as not every url has - // a file extension. When the file name has multiple extensions - // `example.tar.gz`, only the last one should be captured `gz`, not - // `tar.gz`. - URLExtensionKey = attribute.Key("url.extension") - - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it SHOULD be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password SHOULD be redacted and attribute's value SHOULD be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed). Sensitive content provided in `url.full` SHOULD be - // scrubbed when instrumentations can identify it. - URLFullKey = attribute.Key("url.full") - - // URLOriginalKey is the attribute Key conforming to the "url.original" - // semantic conventions. It represents the unmodified original URL as seen - // in the event source. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // 'search?q=OpenTelemetry' - // Note: In network monitoring, the observed URL may be a full URL, whereas - // in access logs, the URL is often just represented as a path. This field - // is meant to represent the URL as it was observed, complete or not. - // `url.original` might contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case password and - // username SHOULD NOT be redacted and attribute's value SHOULD remain the - // same. - URLOriginalKey = attribute.Key("url.original") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when - // instrumentations can identify it. - URLPathKey = attribute.Key("url.path") - - // URLPortKey is the attribute Key conforming to the "url.port" semantic - // conventions. It represents the port extracted from the `url.full` - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 443 - URLPortKey = attribute.Key("url.port") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLRegisteredDomainKey is the attribute Key conforming to the - // "url.registered_domain" semantic conventions. It represents the highest - // registered url domain, stripped of the subdomain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.com', 'foo.co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). For example, the registered domain for - // `foo.example.com` is `example.com`. Trying to approximate this by simply - // taking the last two labels will not work well for TLDs such as `co.uk`. - URLRegisteredDomainKey = attribute.Key("url.registered_domain") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") - - // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" - // semantic conventions. It represents the subdomain portion of a fully - // qualified domain name includes all of the names except the host name - // under the registered_domain. In a partially qualified domain, or if the - // qualification level of the full name cannot be determined, subdomain - // contains all of the names below the registered domain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'east', 'sub2.sub1' - // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If - // the domain has multiple levels of subdomain, such as - // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, - // with no trailing period. - URLSubdomainKey = attribute.Key("url.subdomain") - - // URLTemplateKey is the attribute Key conforming to the "url.template" - // semantic conventions. It represents the low-cardinality template of an - // [absolute path - // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/users/{id}', '/users/:id', '/users?id={id}' - URLTemplateKey = attribute.Key("url.template") - - // URLTopLevelDomainKey is the attribute Key conforming to the - // "url.top_level_domain" semantic conventions. It represents the effective - // top level domain (eTLD), also known as the domain suffix, is the last - // part of the domain name. For example, the top level domain for - // example.com is `com`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com', 'co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). - URLTopLevelDomainKey = attribute.Key("url.top_level_domain") -) - -// URLDomain returns an attribute KeyValue conforming to the "url.domain" -// semantic conventions. It represents the domain extracted from the -// `url.full`, such as "opentelemetry.io". -func URLDomain(val string) attribute.KeyValue { - return URLDomainKey.String(val) -} - -// URLExtension returns an attribute KeyValue conforming to the -// "url.extension" semantic conventions. It represents the file extension -// extracted from the `url.full`, excluding the leading dot. -func URLExtension(val string) attribute.KeyValue { - return URLExtensionKey.String(val) -} - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLOriginal returns an attribute KeyValue conforming to the -// "url.original" semantic conventions. It represents the unmodified original -// URL as seen in the event source. -func URLOriginal(val string) attribute.KeyValue { - return URLOriginalKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLPort returns an attribute KeyValue conforming to the "url.port" -// semantic conventions. It represents the port extracted from the `url.full` -func URLPort(val int) attribute.KeyValue { - return URLPortKey.Int(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLRegisteredDomain returns an attribute KeyValue conforming to the -// "url.registered_domain" semantic conventions. It represents the highest -// registered url domain, stripped of the subdomain. -func URLRegisteredDomain(val string) attribute.KeyValue { - return URLRegisteredDomainKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// URLSubdomain returns an attribute KeyValue conforming to the -// "url.subdomain" semantic conventions. It represents the subdomain portion of -// a fully qualified domain name includes all of the names except the host name -// under the registered_domain. In a partially qualified domain, or if the -// qualification level of the full name cannot be determined, subdomain -// contains all of the names below the registered domain. -func URLSubdomain(val string) attribute.KeyValue { - return URLSubdomainKey.String(val) -} - -// URLTemplate returns an attribute KeyValue conforming to the -// "url.template" semantic conventions. It represents the low-cardinality -// template of an [absolute path -// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). -func URLTemplate(val string) attribute.KeyValue { - return URLTemplateKey.String(val) -} - -// URLTopLevelDomain returns an attribute KeyValue conforming to the -// "url.top_level_domain" semantic conventions. It represents the effective top -// level domain (eTLD), also known as the domain suffix, is the last part of -// the domain name. For example, the top level domain for example.com is `com`. -func URLTopLevelDomain(val string) attribute.KeyValue { - return URLTopLevelDomainKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentNameKey is the attribute Key conforming to the - // "user_agent.name" semantic conventions. It represents the name of the - // user-agent extracted from original. Usually refers to the browser's - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Safari', 'YourApp' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's name - // from original string. In the case of using a user-agent for non-browser - // products, such as microservices with multiple names/versions inside the - // `user_agent.original`, the most significant name SHOULD be selected. In - // such a scenario it should align with `user_agent.version` - UserAgentNameKey = attribute.Key("user_agent.name") - - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 - // grpc-java-okhttp/1.27.2' - UserAgentOriginalKey = attribute.Key("user_agent.original") - - // UserAgentVersionKey is the attribute Key conforming to the - // "user_agent.version" semantic conventions. It represents the version of - // the user-agent extracted from original. Usually refers to the browser's - // version - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.1.2', '1.0.0' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's - // version from original string. In the case of using a user-agent for - // non-browser products, such as microservices with multiple names/versions - // inside the `user_agent.original`, the most significant version SHOULD be - // selected. In such a scenario it should align with `user_agent.name` - UserAgentVersionKey = attribute.Key("user_agent.version") -) - -// UserAgentName returns an attribute KeyValue conforming to the -// "user_agent.name" semantic conventions. It represents the name of the -// user-agent extracted from original. Usually refers to the browser's name. -func UserAgentName(val string) attribute.KeyValue { - return UserAgentNameKey.String(val) -} - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// UserAgentVersion returns an attribute KeyValue conforming to the -// "user_agent.version" semantic conventions. It represents the version of the -// user-agent extracted from original. Usually refers to the browser's version -func UserAgentVersion(val string) attribute.KeyValue { - return UserAgentVersionKey.String(val) -} - -// The attributes used to describe the packaged software running the -// application code. -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go deleted file mode 100644 index d031bbea7..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.26.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go deleted file mode 100644 index bfaee0d56..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go deleted file mode 100644 index fcdb9f485..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go +++ /dev/null @@ -1,1307 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -const ( - - // ContainerCPUTime is the metric conforming to the "container.cpu.time" - // semantic conventions. It represents the total CPU time consumed. - // Instrument: counter - // Unit: s - // Stability: Experimental - ContainerCPUTimeName = "container.cpu.time" - ContainerCPUTimeUnit = "s" - ContainerCPUTimeDescription = "Total CPU time consumed" - - // ContainerMemoryUsage is the metric conforming to the - // "container.memory.usage" semantic conventions. It represents the memory - // usage of the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerMemoryUsageName = "container.memory.usage" - ContainerMemoryUsageUnit = "By" - ContainerMemoryUsageDescription = "Memory usage of the container." - - // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic - // conventions. It represents the disk bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerDiskIoName = "container.disk.io" - ContainerDiskIoUnit = "By" - ContainerDiskIoDescription = "Disk bytes for the container." - - // ContainerNetworkIo is the metric conforming to the "container.network.io" - // semantic conventions. It represents the network bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerNetworkIoName = "container.network.io" - ContainerNetworkIoUnit = "By" - ContainerNetworkIoDescription = "Network bytes for the container." - - // DBClientOperationDuration is the metric conforming to the - // "db.client.operation.duration" semantic conventions. It represents the - // duration of database client operations. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientOperationDurationName = "db.client.operation.duration" - DBClientOperationDurationUnit = "s" - DBClientOperationDurationDescription = "Duration of database client operations." - - // DBClientConnectionCount is the metric conforming to the - // "db.client.connection.count" semantic conventions. It represents the number - // of connections that are currently in state described by the `state` - // attribute. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionCountName = "db.client.connection.count" - DBClientConnectionCountUnit = "{connection}" - DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" - - // DBClientConnectionIdleMax is the metric conforming to the - // "db.client.connection.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMaxName = "db.client.connection.idle.max" - DBClientConnectionIdleMaxUnit = "{connection}" - DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" - - // DBClientConnectionIdleMin is the metric conforming to the - // "db.client.connection.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMinName = "db.client.connection.idle.min" - DBClientConnectionIdleMinUnit = "{connection}" - DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" - - // DBClientConnectionMax is the metric conforming to the - // "db.client.connection.max" semantic conventions. It represents the maximum - // number of open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionMaxName = "db.client.connection.max" - DBClientConnectionMaxUnit = "{connection}" - DBClientConnectionMaxDescription = "The maximum number of open connections allowed" - - // DBClientConnectionPendingRequests is the metric conforming to the - // "db.client.connection.pending_requests" semantic conventions. It represents - // the number of pending requests for an open connection, cumulative for the - // entire pool. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" - DBClientConnectionPendingRequestsUnit = "{request}" - DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" - - // DBClientConnectionTimeouts is the metric conforming to the - // "db.client.connection.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionTimeoutsName = "db.client.connection.timeouts" - DBClientConnectionTimeoutsUnit = "{timeout}" - DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" - - // DBClientConnectionCreateTime is the metric conforming to the - // "db.client.connection.create_time" semantic conventions. It represents the - // time it took to create a new connection. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionCreateTimeName = "db.client.connection.create_time" - DBClientConnectionCreateTimeUnit = "s" - DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" - - // DBClientConnectionWaitTime is the metric conforming to the - // "db.client.connection.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionWaitTimeName = "db.client.connection.wait_time" - DBClientConnectionWaitTimeUnit = "s" - DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" - - // DBClientConnectionUseTime is the metric conforming to the - // "db.client.connection.use_time" semantic conventions. It represents the time - // between borrowing a connection and returning it to the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionUseTimeName = "db.client.connection.use_time" - DBClientConnectionUseTimeUnit = "s" - DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" - - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the - // deprecated, use `db.client.connection.count` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsUsageName = "db.client.connections.usage" - DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." - - // DBClientConnectionsIdleMax is the metric conforming to the - // "db.client.connections.idle.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" - DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." - - // DBClientConnectionsIdleMin is the metric conforming to the - // "db.client.connections.idle.min" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.min` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMinName = "db.client.connections.idle.min" - DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." - - // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsMaxName = "db.client.connections.max" - DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." - - // DBClientConnectionsPendingRequests is the metric conforming to the - // "db.client.connections.pending_requests" semantic conventions. It represents - // the deprecated, use `db.client.connection.pending_requests` instead. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" - DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." - - // DBClientConnectionsTimeouts is the metric conforming to the - // "db.client.connections.timeouts" semantic conventions. It represents the - // deprecated, use `db.client.connection.timeouts` instead. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" - DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." - - // DBClientConnectionsCreateTime is the metric conforming to the - // "db.client.connections.create_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.create_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsCreateTimeName = "db.client.connections.create_time" - DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsWaitTime is the metric conforming to the - // "db.client.connections.wait_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.wait_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" - DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsUseTime is the metric conforming to the - // "db.client.connections.use_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.use_time` instead. Note: the unit also - // changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsUseTimeName = "db.client.connections.use_time" - DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." - - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - - // AspnetcoreRoutingMatchAttempts is the metric conforming to the - // "aspnetcore.routing.match_attempts" semantic conventions. It represents the - // number of requests that were attempted to be matched to an endpoint. - // Instrument: counter - // Unit: {match_attempt} - // Stability: Stable - AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" - AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" - AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." - - // AspnetcoreDiagnosticsExceptions is the metric conforming to the - // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the - // number of exceptions caught by exception handling middleware. - // Instrument: counter - // Unit: {exception} - // Stability: Stable - AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" - AspnetcoreDiagnosticsExceptionsUnit = "{exception}" - AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." - - // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the - // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It - // represents the number of requests that are currently active on the server - // that hold a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" - AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" - AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." - - // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the - // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It - // represents the duration of rate limiting lease held by requests on the - // server. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" - AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" - AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." - - // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the - // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It - // represents the time the request spent in a queue waiting to acquire a rate - // limiting lease. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" - AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" - AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the - // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It - // represents the number of requests that are currently queued, waiting to - // acquire a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" - AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" - AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingRequests is the metric conforming to the - // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the - // number of requests that tried to acquire a rate limiting lease. - // Instrument: counter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" - AspnetcoreRateLimitingRequestsUnit = "{request}" - AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." - - // KestrelActiveConnections is the metric conforming to the - // "kestrel.active_connections" semantic conventions. It represents the number - // of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelActiveConnectionsName = "kestrel.active_connections" - KestrelActiveConnectionsUnit = "{connection}" - KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // KestrelConnectionDuration is the metric conforming to the - // "kestrel.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelConnectionDurationName = "kestrel.connection.duration" - KestrelConnectionDurationUnit = "s" - KestrelConnectionDurationDescription = "The duration of connections on the server." - - // KestrelRejectedConnections is the metric conforming to the - // "kestrel.rejected_connections" semantic conventions. It represents the - // number of connections rejected by the server. - // Instrument: counter - // Unit: {connection} - // Stability: Stable - KestrelRejectedConnectionsName = "kestrel.rejected_connections" - KestrelRejectedConnectionsUnit = "{connection}" - KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." - - // KestrelQueuedConnections is the metric conforming to the - // "kestrel.queued_connections" semantic conventions. It represents the number - // of connections that are currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelQueuedConnectionsName = "kestrel.queued_connections" - KestrelQueuedConnectionsUnit = "{connection}" - KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." - - // KestrelQueuedRequests is the metric conforming to the - // "kestrel.queued_requests" semantic conventions. It represents the number of - // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are - // currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - KestrelQueuedRequestsName = "kestrel.queued_requests" - KestrelQueuedRequestsUnit = "{request}" - KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." - - // KestrelUpgradedConnections is the metric conforming to the - // "kestrel.upgraded_connections" semantic conventions. It represents the - // number of connections that are currently upgraded (WebSockets). . - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" - KestrelUpgradedConnectionsUnit = "{connection}" - KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." - - // KestrelTLSHandshakeDuration is the metric conforming to the - // "kestrel.tls_handshake.duration" semantic conventions. It represents the - // duration of TLS handshakes on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" - KestrelTLSHandshakeDurationUnit = "s" - KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." - - // KestrelActiveTLSHandshakes is the metric conforming to the - // "kestrel.active_tls_handshakes" semantic conventions. It represents the - // number of TLS handshakes that are currently in progress on the server. - // Instrument: updowncounter - // Unit: {handshake} - // Stability: Stable - KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" - KestrelActiveTLSHandshakesUnit = "{handshake}" - KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." - - // SignalrServerConnectionDuration is the metric conforming to the - // "signalr.server.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - SignalrServerConnectionDurationName = "signalr.server.connection.duration" - SignalrServerConnectionDurationUnit = "s" - SignalrServerConnectionDurationDescription = "The duration of connections on the server." - - // SignalrServerActiveConnections is the metric conforming to the - // "signalr.server.active_connections" semantic conventions. It represents the - // number of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - SignalrServerActiveConnectionsName = "signalr.server.active_connections" - SignalrServerActiveConnectionsUnit = "{connection}" - SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" - // semantic conventions. It represents the measures the duration of the - // function's logic execution. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInvokeDurationName = "faas.invoke_duration" - FaaSInvokeDurationUnit = "s" - FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" - - // FaaSInitDuration is the metric conforming to the "faas.init_duration" - // semantic conventions. It represents the measures the duration of the - // function's initialization, such as a cold start. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInitDurationName = "faas.init_duration" - FaaSInitDurationUnit = "s" - FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" - - // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic - // conventions. It represents the number of invocation cold starts. - // Instrument: counter - // Unit: {coldstart} - // Stability: Experimental - FaaSColdstartsName = "faas.coldstarts" - FaaSColdstartsUnit = "{coldstart}" - FaaSColdstartsDescription = "Number of invocation cold starts" - - // FaaSErrors is the metric conforming to the "faas.errors" semantic - // conventions. It represents the number of invocation errors. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - FaaSErrorsName = "faas.errors" - FaaSErrorsUnit = "{error}" - FaaSErrorsDescription = "Number of invocation errors" - - // FaaSInvocations is the metric conforming to the "faas.invocations" semantic - // conventions. It represents the number of successful invocations. - // Instrument: counter - // Unit: {invocation} - // Stability: Experimental - FaaSInvocationsName = "faas.invocations" - FaaSInvocationsUnit = "{invocation}" - FaaSInvocationsDescription = "Number of successful invocations" - - // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic - // conventions. It represents the number of invocation timeouts. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - FaaSTimeoutsName = "faas.timeouts" - FaaSTimeoutsUnit = "{timeout}" - FaaSTimeoutsDescription = "Number of invocation timeouts" - - // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic - // conventions. It represents the distribution of max memory usage per - // invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSMemUsageName = "faas.mem_usage" - FaaSMemUsageUnit = "By" - FaaSMemUsageDescription = "Distribution of max memory usage per invocation" - - // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic - // conventions. It represents the distribution of CPU usage per invocation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSCPUUsageName = "faas.cpu_usage" - FaaSCPUUsageUnit = "s" - FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" - - // FaaSNetIo is the metric conforming to the "faas.net_io" semantic - // conventions. It represents the distribution of net I/O usage per invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSNetIoName = "faas.net_io" - FaaSNetIoUnit = "By" - FaaSNetIoDescription = "Distribution of net I/O usage per invocation" - - // HTTPServerRequestDuration is the metric conforming to the - // "http.server.request.duration" semantic conventions. It represents the - // duration of HTTP server requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPServerRequestDurationName = "http.server.request.duration" - HTTPServerRequestDurationUnit = "s" - HTTPServerRequestDurationDescription = "Duration of HTTP server requests." - - // HTTPServerActiveRequests is the metric conforming to the - // "http.server.active_requests" semantic conventions. It represents the number - // of active HTTP server requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPServerActiveRequestsName = "http.server.active_requests" - HTTPServerActiveRequestsUnit = "{request}" - HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." - - // HTTPServerRequestBodySize is the metric conforming to the - // "http.server.request.body.size" semantic conventions. It represents the size - // of HTTP server request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerRequestBodySizeName = "http.server.request.body.size" - HTTPServerRequestBodySizeUnit = "By" - HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." - - // HTTPServerResponseBodySize is the metric conforming to the - // "http.server.response.body.size" semantic conventions. It represents the - // size of HTTP server response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerResponseBodySizeName = "http.server.response.body.size" - HTTPServerResponseBodySizeUnit = "By" - HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." - - // HTTPClientRequestDuration is the metric conforming to the - // "http.client.request.duration" semantic conventions. It represents the - // duration of HTTP client requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPClientRequestDurationName = "http.client.request.duration" - HTTPClientRequestDurationUnit = "s" - HTTPClientRequestDurationDescription = "Duration of HTTP client requests." - - // HTTPClientRequestBodySize is the metric conforming to the - // "http.client.request.body.size" semantic conventions. It represents the size - // of HTTP client request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientRequestBodySizeName = "http.client.request.body.size" - HTTPClientRequestBodySizeUnit = "By" - HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." - - // HTTPClientResponseBodySize is the metric conforming to the - // "http.client.response.body.size" semantic conventions. It represents the - // size of HTTP client response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientResponseBodySizeName = "http.client.response.body.size" - HTTPClientResponseBodySizeUnit = "By" - HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." - - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - - // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic - // conventions. It represents the measure of initial memory requested. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmMemoryInitName = "jvm.memory.init" - JvmMemoryInitUnit = "By" - JvmMemoryInitDescription = "Measure of initial memory requested." - - // JvmSystemCPUUtilization is the metric conforming to the - // "jvm.system.cpu.utilization" semantic conventions. It represents the recent - // CPU utilization for the whole system as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" - JvmSystemCPUUtilizationUnit = "1" - JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." - - // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" - // semantic conventions. It represents the average CPU load of the whole system - // for the last minute as reported by the JVM. - // Instrument: gauge - // Unit: {run_queue_item} - // Stability: Experimental - JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" - JvmSystemCPULoad1mUnit = "{run_queue_item}" - JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." - - // JvmBufferMemoryUsage is the metric conforming to the - // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of - // memory used by buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" - JvmBufferMemoryUsageUnit = "By" - JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." - - // JvmBufferMemoryLimit is the metric conforming to the - // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of - // total memory capacity of buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" - JvmBufferMemoryLimitUnit = "By" - JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." - - // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic - // conventions. It represents the number of buffers in the pool. - // Instrument: updowncounter - // Unit: {buffer} - // Stability: Experimental - JvmBufferCountName = "jvm.buffer.count" - JvmBufferCountUnit = "{buffer}" - JvmBufferCountDescription = "Number of buffers in the pool." - - // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic - // conventions. It represents the measure of memory used. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedName = "jvm.memory.used" - JvmMemoryUsedUnit = "By" - JvmMemoryUsedDescription = "Measure of memory used." - - // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" - // semantic conventions. It represents the measure of memory committed. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryCommittedName = "jvm.memory.committed" - JvmMemoryCommittedUnit = "By" - JvmMemoryCommittedDescription = "Measure of memory committed." - - // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic - // conventions. It represents the measure of max obtainable memory. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryLimitName = "jvm.memory.limit" - JvmMemoryLimitUnit = "By" - JvmMemoryLimitDescription = "Measure of max obtainable memory." - - // JvmMemoryUsedAfterLastGc is the metric conforming to the - // "jvm.memory.used_after_last_gc" semantic conventions. It represents the - // measure of memory used, as measured after the most recent garbage collection - // event on this pool. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" - JvmMemoryUsedAfterLastGcUnit = "By" - JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." - - // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic - // conventions. It represents the duration of JVM garbage collection actions. - // Instrument: histogram - // Unit: s - // Stability: Stable - JvmGcDurationName = "jvm.gc.duration" - JvmGcDurationUnit = "s" - JvmGcDurationDescription = "Duration of JVM garbage collection actions." - - // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic - // conventions. It represents the number of executing platform threads. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Stable - JvmThreadCountName = "jvm.thread.count" - JvmThreadCountUnit = "{thread}" - JvmThreadCountDescription = "Number of executing platform threads." - - // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic - // conventions. It represents the number of classes loaded since JVM start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassLoadedName = "jvm.class.loaded" - JvmClassLoadedUnit = "{class}" - JvmClassLoadedDescription = "Number of classes loaded since JVM start." - - // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" - // semantic conventions. It represents the number of classes unloaded since JVM - // start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassUnloadedName = "jvm.class.unloaded" - JvmClassUnloadedUnit = "{class}" - JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." - - // JvmClassCount is the metric conforming to the "jvm.class.count" semantic - // conventions. It represents the number of classes currently loaded. - // Instrument: updowncounter - // Unit: {class} - // Stability: Stable - JvmClassCountName = "jvm.class.count" - JvmClassCountUnit = "{class}" - JvmClassCountDescription = "Number of classes currently loaded." - - // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic - // conventions. It represents the number of processors available to the Java - // virtual machine. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Stable - JvmCPUCountName = "jvm.cpu.count" - JvmCPUCountUnit = "{cpu}" - JvmCPUCountDescription = "Number of processors available to the Java virtual machine." - - // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic - // conventions. It represents the cPU time used by the process as reported by - // the JVM. - // Instrument: counter - // Unit: s - // Stability: Stable - JvmCPUTimeName = "jvm.cpu.time" - JvmCPUTimeUnit = "s" - JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." - - // JvmCPURecentUtilization is the metric conforming to the - // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent - // CPU utilization for the process as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Stable - JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" - JvmCPURecentUtilizationUnit = "1" - JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." - - // MessagingPublishDuration is the metric conforming to the - // "messaging.publish.duration" semantic conventions. It represents the - // measures the duration of publish operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingPublishDurationName = "messaging.publish.duration" - MessagingPublishDurationUnit = "s" - MessagingPublishDurationDescription = "Measures the duration of publish operation." - - // MessagingReceiveDuration is the metric conforming to the - // "messaging.receive.duration" semantic conventions. It represents the - // measures the duration of receive operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingReceiveDurationName = "messaging.receive.duration" - MessagingReceiveDurationUnit = "s" - MessagingReceiveDurationDescription = "Measures the duration of receive operation." - - // MessagingProcessDuration is the metric conforming to the - // "messaging.process.duration" semantic conventions. It represents the - // measures the duration of process operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingProcessDurationName = "messaging.process.duration" - MessagingProcessDurationUnit = "s" - MessagingProcessDurationDescription = "Measures the duration of process operation." - - // MessagingPublishMessages is the metric conforming to the - // "messaging.publish.messages" semantic conventions. It represents the - // measures the number of published messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingPublishMessagesName = "messaging.publish.messages" - MessagingPublishMessagesUnit = "{message}" - MessagingPublishMessagesDescription = "Measures the number of published messages." - - // MessagingReceiveMessages is the metric conforming to the - // "messaging.receive.messages" semantic conventions. It represents the - // measures the number of received messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingReceiveMessagesName = "messaging.receive.messages" - MessagingReceiveMessagesUnit = "{message}" - MessagingReceiveMessagesDescription = "Measures the number of received messages." - - // MessagingProcessMessages is the metric conforming to the - // "messaging.process.messages" semantic conventions. It represents the - // measures the number of processed messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingProcessMessagesName = "messaging.process.messages" - MessagingProcessMessagesUnit = "{message}" - MessagingProcessMessagesDescription = "Measures the number of processed messages." - - // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic - // conventions. It represents the total CPU seconds broken down by different - // states. - // Instrument: counter - // Unit: s - // Stability: Experimental - ProcessCPUTimeName = "process.cpu.time" - ProcessCPUTimeUnit = "s" - ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." - - // ProcessCPUUtilization is the metric conforming to the - // "process.cpu.utilization" semantic conventions. It represents the difference - // in process.cpu.time since the last measurement, divided by the elapsed time - // and number of CPUs available to the process. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - ProcessCPUUtilizationName = "process.cpu.utilization" - ProcessCPUUtilizationUnit = "1" - ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." - - // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" - // semantic conventions. It represents the amount of physical memory in use. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryUsageName = "process.memory.usage" - ProcessMemoryUsageUnit = "By" - ProcessMemoryUsageDescription = "The amount of physical memory in use." - - // ProcessMemoryVirtual is the metric conforming to the - // "process.memory.virtual" semantic conventions. It represents the amount of - // committed virtual memory. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryVirtualName = "process.memory.virtual" - ProcessMemoryVirtualUnit = "By" - ProcessMemoryVirtualDescription = "The amount of committed virtual memory." - - // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic - // conventions. It represents the disk bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessDiskIoName = "process.disk.io" - ProcessDiskIoUnit = "By" - ProcessDiskIoDescription = "Disk bytes transferred." - - // ProcessNetworkIo is the metric conforming to the "process.network.io" - // semantic conventions. It represents the network bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessNetworkIoName = "process.network.io" - ProcessNetworkIoUnit = "By" - ProcessNetworkIoDescription = "Network bytes transferred." - - // ProcessThreadCount is the metric conforming to the "process.thread.count" - // semantic conventions. It represents the process threads count. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Experimental - ProcessThreadCountName = "process.thread.count" - ProcessThreadCountUnit = "{thread}" - ProcessThreadCountDescription = "Process threads count." - - // ProcessOpenFileDescriptorCount is the metric conforming to the - // "process.open_file_descriptor.count" semantic conventions. It represents the - // number of file descriptors in use by the process. - // Instrument: updowncounter - // Unit: {count} - // Stability: Experimental - ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" - ProcessOpenFileDescriptorCountUnit = "{count}" - ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." - - // ProcessContextSwitches is the metric conforming to the - // "process.context_switches" semantic conventions. It represents the number of - // times the process has been context switched. - // Instrument: counter - // Unit: {count} - // Stability: Experimental - ProcessContextSwitchesName = "process.context_switches" - ProcessContextSwitchesUnit = "{count}" - ProcessContextSwitchesDescription = "Number of times the process has been context switched." - - // ProcessPagingFaults is the metric conforming to the "process.paging.faults" - // semantic conventions. It represents the number of page faults the process - // has made. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - ProcessPagingFaultsName = "process.paging.faults" - ProcessPagingFaultsUnit = "{fault}" - ProcessPagingFaultsDescription = "Number of page faults the process has made." - - // RPCServerDuration is the metric conforming to the "rpc.server.duration" - // semantic conventions. It represents the measures the duration of inbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCServerDurationName = "rpc.server.duration" - RPCServerDurationUnit = "ms" - RPCServerDurationDescription = "Measures the duration of inbound RPC." - - // RPCServerRequestSize is the metric conforming to the - // "rpc.server.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerRequestSizeName = "rpc.server.request.size" - RPCServerRequestSizeUnit = "By" - RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCServerResponseSize is the metric conforming to the - // "rpc.server.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerResponseSizeName = "rpc.server.response.size" - RPCServerResponseSizeUnit = "By" - RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCServerRequestsPerRPC is the metric conforming to the - // "rpc.server.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" - RPCServerRequestsPerRPCUnit = "{count}" - RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCServerResponsesPerRPC is the metric conforming to the - // "rpc.server.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" - RPCServerResponsesPerRPCUnit = "{count}" - RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // RPCClientDuration is the metric conforming to the "rpc.client.duration" - // semantic conventions. It represents the measures the duration of outbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCClientDurationName = "rpc.client.duration" - RPCClientDurationUnit = "ms" - RPCClientDurationDescription = "Measures the duration of outbound RPC." - - // RPCClientRequestSize is the metric conforming to the - // "rpc.client.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientRequestSizeName = "rpc.client.request.size" - RPCClientRequestSizeUnit = "By" - RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCClientResponseSize is the metric conforming to the - // "rpc.client.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientResponseSizeName = "rpc.client.response.size" - RPCClientResponseSizeUnit = "By" - RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCClientRequestsPerRPC is the metric conforming to the - // "rpc.client.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" - RPCClientRequestsPerRPCUnit = "{count}" - RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCClientResponsesPerRPC is the metric conforming to the - // "rpc.client.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" - RPCClientResponsesPerRPCUnit = "{count}" - RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic - // conventions. It represents the seconds each logical CPU spent on each mode. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemCPUTimeName = "system.cpu.time" - SystemCPUTimeUnit = "s" - SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" - - // SystemCPUUtilization is the metric conforming to the - // "system.cpu.utilization" semantic conventions. It represents the difference - // in system.cpu.time since the last measurement, divided by the elapsed time - // and number of logical CPUs. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - SystemCPUUtilizationName = "system.cpu.utilization" - SystemCPUUtilizationUnit = "1" - SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" - - // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" - // semantic conventions. It represents the reports the current frequency of the - // CPU in Hz. - // Instrument: gauge - // Unit: {Hz} - // Stability: Experimental - SystemCPUFrequencyName = "system.cpu.frequency" - SystemCPUFrequencyUnit = "{Hz}" - SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" - - // SystemCPUPhysicalCount is the metric conforming to the - // "system.cpu.physical.count" semantic conventions. It represents the reports - // the number of actual physical processor cores on the hardware. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPUPhysicalCountName = "system.cpu.physical.count" - SystemCPUPhysicalCountUnit = "{cpu}" - SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" - - // SystemCPULogicalCount is the metric conforming to the - // "system.cpu.logical.count" semantic conventions. It represents the reports - // the number of logical (virtual) processor cores created by the operating - // system to manage multitasking. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPULogicalCountName = "system.cpu.logical.count" - SystemCPULogicalCountUnit = "{cpu}" - SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" - - // SystemMemoryUsage is the metric conforming to the "system.memory.usage" - // semantic conventions. It represents the reports memory in use by state. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryUsageName = "system.memory.usage" - SystemMemoryUsageUnit = "By" - SystemMemoryUsageDescription = "Reports memory in use by state." - - // SystemMemoryLimit is the metric conforming to the "system.memory.limit" - // semantic conventions. It represents the total memory available in the - // system. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryLimitName = "system.memory.limit" - SystemMemoryLimitUnit = "By" - SystemMemoryLimitDescription = "Total memory available in the system." - - // SystemMemoryShared is the metric conforming to the "system.memory.shared" - // semantic conventions. It represents the shared memory used (mostly by - // tmpfs). - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemorySharedName = "system.memory.shared" - SystemMemorySharedUnit = "By" - SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." - - // SystemMemoryUtilization is the metric conforming to the - // "system.memory.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemMemoryUtilizationName = "system.memory.utilization" - SystemMemoryUtilizationUnit = "1" - - // SystemPagingUsage is the metric conforming to the "system.paging.usage" - // semantic conventions. It represents the unix swap or windows pagefile usage. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemPagingUsageName = "system.paging.usage" - SystemPagingUsageUnit = "By" - SystemPagingUsageDescription = "Unix swap or windows pagefile usage" - - // SystemPagingUtilization is the metric conforming to the - // "system.paging.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingUtilizationName = "system.paging.utilization" - SystemPagingUtilizationUnit = "1" - - // SystemPagingFaults is the metric conforming to the "system.paging.faults" - // semantic conventions. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingFaultsName = "system.paging.faults" - SystemPagingFaultsUnit = "{fault}" - - // SystemPagingOperations is the metric conforming to the - // "system.paging.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingOperationsName = "system.paging.operations" - SystemPagingOperationsUnit = "{operation}" - - // SystemDiskIo is the metric conforming to the "system.disk.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskIoName = "system.disk.io" - SystemDiskIoUnit = "By" - - // SystemDiskOperations is the metric conforming to the - // "system.disk.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskOperationsName = "system.disk.operations" - SystemDiskOperationsUnit = "{operation}" - - // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" - // semantic conventions. It represents the time disk spent activated. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskIoTimeName = "system.disk.io_time" - SystemDiskIoTimeUnit = "s" - SystemDiskIoTimeDescription = "Time disk spent activated" - - // SystemDiskOperationTime is the metric conforming to the - // "system.disk.operation_time" semantic conventions. It represents the sum of - // the time each operation took to complete. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskOperationTimeName = "system.disk.operation_time" - SystemDiskOperationTimeUnit = "s" - SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" - - // SystemDiskMerged is the metric conforming to the "system.disk.merged" - // semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskMergedName = "system.disk.merged" - SystemDiskMergedUnit = "{operation}" - - // SystemFilesystemUsage is the metric conforming to the - // "system.filesystem.usage" semantic conventions. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUsageName = "system.filesystem.usage" - SystemFilesystemUsageUnit = "By" - - // SystemFilesystemUtilization is the metric conforming to the - // "system.filesystem.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUtilizationName = "system.filesystem.utilization" - SystemFilesystemUtilizationUnit = "1" - - // SystemNetworkDropped is the metric conforming to the - // "system.network.dropped" semantic conventions. It represents the count of - // packets that are dropped or discarded even though there was no error. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - SystemNetworkDroppedName = "system.network.dropped" - SystemNetworkDroppedUnit = "{packet}" - SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" - - // SystemNetworkPackets is the metric conforming to the - // "system.network.packets" semantic conventions. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkPacketsName = "system.network.packets" - SystemNetworkPacketsUnit = "{packet}" - - // SystemNetworkErrors is the metric conforming to the "system.network.errors" - // semantic conventions. It represents the count of network errors detected. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - SystemNetworkErrorsName = "system.network.errors" - SystemNetworkErrorsUnit = "{error}" - SystemNetworkErrorsDescription = "Count of network errors detected" - - // SystemNetworkIo is the metric conforming to the "system.network.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkIoName = "system.network.io" - SystemNetworkIoUnit = "By" - - // SystemNetworkConnections is the metric conforming to the - // "system.network.connections" semantic conventions. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkConnectionsName = "system.network.connections" - SystemNetworkConnectionsUnit = "{connection}" - - // SystemProcessCount is the metric conforming to the "system.process.count" - // semantic conventions. It represents the total number of processes in each - // state. - // Instrument: updowncounter - // Unit: {process} - // Stability: Experimental - SystemProcessCountName = "system.process.count" - SystemProcessCountUnit = "{process}" - SystemProcessCountDescription = "Total number of processes in each state" - - // SystemProcessCreated is the metric conforming to the - // "system.process.created" semantic conventions. It represents the total - // number of processes created over uptime of the host. - // Instrument: counter - // Unit: {process} - // Stability: Experimental - SystemProcessCreatedName = "system.process.created" - SystemProcessCreatedUnit = "{process}" - SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" - - // SystemLinuxMemoryAvailable is the metric conforming to the - // "system.linux.memory.available" semantic conventions. It represents an - // estimate of how much memory is available for starting new applications, - // without causing swapping. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemLinuxMemoryAvailableName = "system.linux.memory.available" - SystemLinuxMemoryAvailableUnit = "By" - SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go deleted file mode 100644 index 4c87c7adc..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md deleted file mode 100644 index 02b56115e..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md +++ /dev/null @@ -1,4 +0,0 @@ - -# Migration from v1.33.0 to v1.34.0 - -The `go.opentelemetry.io/otel/semconv/v1.34.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.33.0`. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md deleted file mode 100644 index fab06c975..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.34.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.34.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.34.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md new file mode 100644 index 000000000..248054789 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md @@ -0,0 +1,41 @@ + +# Migration from v1.36.0 to v1.37.0 + +The `go.opentelemetry.io/otel/semconv/v1.37.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.36.0` with the following exceptions. + +## Removed + +The following declarations have been removed. +Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. + +If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. +If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. + +- `ContainerRuntime` +- `ContainerRuntimeKey` +- `GenAIOpenAIRequestServiceTierAuto` +- `GenAIOpenAIRequestServiceTierDefault` +- `GenAIOpenAIRequestServiceTierKey` +- `GenAIOpenAIResponseServiceTier` +- `GenAIOpenAIResponseServiceTierKey` +- `GenAIOpenAIResponseSystemFingerprint` +- `GenAIOpenAIResponseSystemFingerprintKey` +- `GenAISystemAWSBedrock` +- `GenAISystemAnthropic` +- `GenAISystemAzureAIInference` +- `GenAISystemAzureAIOpenAI` +- `GenAISystemCohere` +- `GenAISystemDeepseek` +- `GenAISystemGCPGemini` +- `GenAISystemGCPGenAI` +- `GenAISystemGCPVertexAI` +- `GenAISystemGroq` +- `GenAISystemIBMWatsonxAI` +- `GenAISystemKey` +- `GenAISystemMistralAI` +- `GenAISystemOpenAI` +- `GenAISystemPerplexity` +- `GenAISystemXai` + +[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions +[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md new file mode 100644 index 000000000..d795247f3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.37.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.37.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.37.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go similarity index 89% rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go index 5b5666257..b6b27498f 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go @@ -3,7 +3,7 @@ // Code generated from semantic convention specification. DO NOT EDIT. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" import "go.opentelemetry.io/otel/attribute" @@ -28,7 +28,8 @@ const ( // AndroidOSAPILevelKey is the attribute Key conforming to the // "android.os.api_level" semantic conventions. It represents the uniquely // identifies the framework API revision offered by a version (`os.version`) of - // the android operating system. More information can be found [here]. + // the android operating system. More information can be found in the + // [Android API levels documentation]. // // Type: string // RequirementLevel: Recommended @@ -36,16 +37,17 @@ const ( // // Examples: "33", "32" // - // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels + // [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels AndroidOSAPILevelKey = attribute.Key("android.os.api_level") ) // AndroidOSAPILevel returns an attribute KeyValue conforming to the // "android.os.api_level" semantic conventions. It represents the uniquely // identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found [here]. +// the android operating system. More information can be found in the +// [Android API levels documentation]. // -// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels +// [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels func AndroidOSAPILevel(val string) attribute.KeyValue { return AndroidOSAPILevelKey.String(val) } @@ -73,6 +75,18 @@ var ( // Namespace: app const ( + // AppBuildIDKey is the attribute Key conforming to the "app.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6cff0a7e-cefc-4668-96f5-1273d8b334d0", + // "9f2b833506aa6973a92fde9733e6271f", "my-app-1.0.0-code-123" + AppBuildIDKey = attribute.Key("app.build_id") + // AppInstallationIDKey is the attribute Key conforming to the // "app.installation.id" semantic conventions. It represents a unique identifier // representing the installation of an application on a specific device. @@ -106,16 +120,51 @@ const ( // - [App set ID]. // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. // - // More information about Android identifier best practices can be found [here] - // . + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. // // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations // [App set ID]: https://developer.android.com/identity/app-set-id // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID - // [here]: https://developer.android.com/training/articles/user-data-ids + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids AppInstallationIDKey = attribute.Key("app.installation.id") + // AppJankFrameCountKey is the attribute Key conforming to the + // "app.jank.frame_count" semantic conventions. It represents a number of frame + // renders that experienced jank. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 9, 42 + // Note: Depending on platform limitations, the value provided MAY be + // approximation. + AppJankFrameCountKey = attribute.Key("app.jank.frame_count") + + // AppJankPeriodKey is the attribute Key conforming to the "app.jank.period" + // semantic conventions. It represents the time period, in seconds, for which + // this jank is being reported. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 5.0, 10.24 + AppJankPeriodKey = attribute.Key("app.jank.period") + + // AppJankThresholdKey is the attribute Key conforming to the + // "app.jank.threshold" semantic conventions. It represents the minimum + // rendering threshold for this jank, in seconds. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.016, 0.7, 1.024 + AppJankThresholdKey = attribute.Key("app.jank.threshold") + // AppScreenCoordinateXKey is the attribute Key conforming to the // "app.screen.coordinate.x" semantic conventions. It represents the x // (horizontal) coordinate of a screen coordinate, in screen pixels. @@ -164,6 +213,13 @@ const ( AppWidgetNameKey = attribute.Key("app.widget.name") ) +// AppBuildID returns an attribute KeyValue conforming to the "app.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the application. +func AppBuildID(val string) attribute.KeyValue { + return AppBuildIDKey.String(val) +} + // AppInstallationID returns an attribute KeyValue conforming to the // "app.installation.id" semantic conventions. It represents a unique identifier // representing the installation of an application on a specific device. @@ -171,6 +227,27 @@ func AppInstallationID(val string) attribute.KeyValue { return AppInstallationIDKey.String(val) } +// AppJankFrameCount returns an attribute KeyValue conforming to the +// "app.jank.frame_count" semantic conventions. It represents a number of frame +// renders that experienced jank. +func AppJankFrameCount(val int) attribute.KeyValue { + return AppJankFrameCountKey.Int(val) +} + +// AppJankPeriod returns an attribute KeyValue conforming to the +// "app.jank.period" semantic conventions. It represents the time period, in +// seconds, for which this jank is being reported. +func AppJankPeriod(val float64) attribute.KeyValue { + return AppJankPeriodKey.Float64(val) +} + +// AppJankThreshold returns an attribute KeyValue conforming to the +// "app.jank.threshold" semantic conventions. It represents the minimum rendering +// threshold for this jank, in seconds. +func AppJankThreshold(val float64) attribute.KeyValue { + return AppJankThresholdKey.Float64(val) +} + // AppScreenCoordinateX returns an attribute KeyValue conforming to the // "app.screen.coordinate.x" semantic conventions. It represents the x // (horizontal) coordinate of a screen coordinate, in screen pixels. @@ -1525,59 +1602,14 @@ func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { // Enum values for aws.ecs.launchtype var ( - // ec2 + // Amazon EC2 // Stability: development AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate + // Amazon Fargate // Stability: development AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) -// Namespace: az -const ( - // AzNamespaceKey is the attribute Key conforming to the "az.namespace" semantic - // conventions. It represents the [Azure Resource Provider Namespace] as - // recognized by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" - // - // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers - AzNamespaceKey = attribute.Key("az.namespace") - - // AzServiceRequestIDKey is the attribute Key conforming to the - // "az.service_request_id" semantic conventions. It represents the unique - // identifier of the service request. It's generated by the Azure service and - // returned with the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "00000000-0000-0000-0000-000000000000" - AzServiceRequestIDKey = attribute.Key("az.service_request_id") -) - -// AzNamespace returns an attribute KeyValue conforming to the "az.namespace" -// semantic conventions. It represents the [Azure Resource Provider Namespace] as -// recognized by the client. -// -// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers -func AzNamespace(val string) attribute.KeyValue { - return AzNamespaceKey.String(val) -} - -// AzServiceRequestID returns an attribute KeyValue conforming to the -// "az.service_request_id" semantic conventions. It represents the unique -// identifier of the service request. It's generated by the Azure service and -// returned with the response. -func AzServiceRequestID(val string) attribute.KeyValue { - return AzServiceRequestIDKey.String(val) -} - // Namespace: azure const ( // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" @@ -1665,6 +1697,31 @@ const ( // // Examples: 1000, 1002 AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") + + // AzureResourceProviderNamespaceKey is the attribute Key conforming to the + // "azure.resource_provider.namespace" semantic conventions. It represents the + // [Azure Resource Provider Namespace] as recognized by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" + // + // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers + AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace") + + // AzureServiceRequestIDKey is the attribute Key conforming to the + // "azure.service.request.id" semantic conventions. It represents the unique + // identifier of the service request. It's generated by the Azure service and + // returned with the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00000000-0000-0000-0000-000000000000" + AzureServiceRequestIDKey = attribute.Key("azure.service.request.id") ) // AzureClientID returns an attribute KeyValue conforming to the @@ -1705,6 +1762,23 @@ func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { return AzureCosmosDBResponseSubStatusCodeKey.Int(val) } +// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the +// "azure.resource_provider.namespace" semantic conventions. It represents the +// [Azure Resource Provider Namespace] as recognized by the client. +// +// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers +func AzureResourceProviderNamespace(val string) attribute.KeyValue { + return AzureResourceProviderNamespaceKey.String(val) +} + +// AzureServiceRequestID returns an attribute KeyValue conforming to the +// "azure.service.request.id" semantic conventions. It represents the unique +// identifier of the service request. It's generated by the Azure service and +// returned with the response. +func AzureServiceRequestID(val string) attribute.KeyValue { + return AzureServiceRequestIDKey.String(val) +} + // Enum values for azure.cosmosdb.connection.mode var ( // Gateway (HTTP) connection. @@ -1717,19 +1791,19 @@ var ( // Enum values for azure.cosmosdb.consistency.level var ( - // strong + // Strong // Stability: development AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") - // bounded_staleness + // Bounded Staleness // Stability: development AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") - // session + // Session // Stability: development AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") - // eventual + // Eventual // Stability: development AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") - // consistent_prefix + // Consistent Prefix // Stability: development AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") ) @@ -1944,37 +2018,37 @@ func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { // Enum values for cassandra.consistency.level var ( - // all + // All // Stability: development CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") - // each_quorum + // Each Quorum // Stability: development CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") - // quorum + // Quorum // Stability: development CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") - // local_quorum + // Local Quorum // Stability: development CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") - // one + // One // Stability: development CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") - // two + // Two // Stability: development CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") - // three + // Three // Stability: development CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") - // local_one + // Local One // Stability: development CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") - // any + // Any // Stability: development CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") - // serial + // Serial // Stability: development CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") - // local_serial + // Local Serial // Stability: development CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") ) @@ -2527,7 +2601,7 @@ const ( // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names - // [Fully Qualified Resource ID]: https://docs.microsoft.com/rest/api/resources/resources/get-by-id + // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id CloudResourceIDKey = attribute.Key("cloud.resource_id") ) @@ -2604,25 +2678,25 @@ var ( CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") // Azure Virtual Machines // Stability: development - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm") // Azure Container Apps // Stability: development - CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps") // Azure Container Instances // Stability: development - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances") // Azure Kubernetes Service // Stability: development - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks") // Azure Functions // Stability: development - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions") // Azure App Service // Stability: development - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service") // Azure Red Hat OpenShift // Stability: development - CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure_openshift") + CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift") // Google Bare Metal Solution (BMS) // Stability: development CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") @@ -3374,16 +3448,40 @@ const ( // Examples: "opentelemetry-autoconf" ContainerNameKey = attribute.Key("container.name") - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container runtime - // managing this container. + // ContainerRuntimeDescriptionKey is the attribute Key conforming to the + // "container.runtime.description" semantic conventions. It represents a + // description about the runtime which could include, for example details about + // the CRI/API version being used or other customisations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker://19.3.1 - CRI: 1.22.0" + ContainerRuntimeDescriptionKey = attribute.Key("container.runtime.description") + + // ContainerRuntimeNameKey is the attribute Key conforming to the + // "container.runtime.name" semantic conventions. It represents the container + // runtime managing this container. // // Type: string // RequirementLevel: Recommended // Stability: Development // // Examples: "docker", "containerd", "rkt" - ContainerRuntimeKey = attribute.Key("container.runtime") + ContainerRuntimeNameKey = attribute.Key("container.runtime.name") + + // ContainerRuntimeVersionKey is the attribute Key conforming to the + // "container.runtime.version" semantic conventions. It represents the version + // of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0.0 + ContainerRuntimeVersionKey = attribute.Key("container.runtime.version") ) // ContainerCommand returns an attribute KeyValue conforming to the @@ -3467,6 +3565,13 @@ func ContainerImageTags(val ...string) attribute.KeyValue { return ContainerImageTagsKey.StringSlice(val) } +// ContainerLabel returns an attribute KeyValue conforming to the +// "container.label" semantic conventions. It represents the container labels, +// `` being the label name, the value being the label value. +func ContainerLabel(key string, val string) attribute.KeyValue { + return attribute.String("container.label."+key, val) +} + // ContainerName returns an attribute KeyValue conforming to the "container.name" // semantic conventions. It represents the container name used by container // runtime. @@ -3474,11 +3579,26 @@ func ContainerName(val string) attribute.KeyValue { return ContainerNameKey.String(val) } -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container runtime -// managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) +// ContainerRuntimeDescription returns an attribute KeyValue conforming to the +// "container.runtime.description" semantic conventions. It represents a +// description about the runtime which could include, for example details about +// the CRI/API version being used or other customisations. +func ContainerRuntimeDescription(val string) attribute.KeyValue { + return ContainerRuntimeDescriptionKey.String(val) +} + +// ContainerRuntimeName returns an attribute KeyValue conforming to the +// "container.runtime.name" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntimeName(val string) attribute.KeyValue { + return ContainerRuntimeNameKey.String(val) +} + +// ContainerRuntimeVersion returns an attribute KeyValue conforming to the +// "container.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ContainerRuntimeVersion(val string) attribute.KeyValue { + return ContainerRuntimeVersionKey.String(val) } // Namespace: cpu @@ -3514,28 +3634,28 @@ func CPULogicalNumber(val int) attribute.KeyValue { // Enum values for cpu.mode var ( - // user + // User // Stability: development CPUModeUser = CPUModeKey.String("user") - // system + // System // Stability: development CPUModeSystem = CPUModeKey.String("system") - // nice + // Nice // Stability: development CPUModeNice = CPUModeKey.String("nice") - // idle + // Idle // Stability: development CPUModeIdle = CPUModeKey.String("idle") - // iowait + // IO Wait // Stability: development CPUModeIOWait = CPUModeKey.String("iowait") - // interrupt + // Interrupt // Stability: development CPUModeInterrupt = CPUModeKey.String("interrupt") - // steal + // Steal // Stability: development CPUModeSteal = CPUModeKey.String("steal") - // kernel + // Kernel // Stability: development CPUModeKernel = CPUModeKey.String("kernel") ) @@ -3794,6 +3914,22 @@ func DBOperationName(val string) attribute.KeyValue { return DBOperationNameKey.String(val) } +// DBOperationParameter returns an attribute KeyValue conforming to the +// "db.operation.parameter" semantic conventions. It represents a database +// operation parameter, with `` being the parameter name, and the attribute +// value being a string representation of the parameter value. +func DBOperationParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.operation.parameter."+key, val) +} + +// DBQueryParameter returns an attribute KeyValue conforming to the +// "db.query.parameter" semantic conventions. It represents a database query +// parameter, with `` being the parameter name, and the attribute value +// being a string representation of the parameter value. +func DBQueryParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.query.parameter."+key, val) +} + // DBQuerySummary returns an attribute KeyValue conforming to the // "db.query.summary" semantic conventions. It represents the low cardinality // summary of a database query. @@ -4194,8 +4330,8 @@ const ( // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be // used as values. // - // More information about Android identifier best practices can be found [here] - // . + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. // // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution // > should be taken when storing personal data or anything which can identify a @@ -4210,7 +4346,7 @@ const ( // > opt-in feature.> See [`app.installation.id`]> for a more // > privacy-preserving alternative. // - // [here]: https://developer.android.com/training/articles/user-data-ids + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id DeviceIDKey = attribute.Key("device.id") @@ -4308,6 +4444,17 @@ var ( // Namespace: dns const ( + // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic + // conventions. It represents the list of IPv4 or IPv6 addresses resolved during + // DNS lookup. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + DNSAnswersKey = attribute.Key("dns.answers") + // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" // semantic conventions. It represents the name being queried. // @@ -4323,6 +4470,13 @@ const ( DNSQuestionNameKey = attribute.Key("dns.question.name") ) +// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers" +// semantic conventions. It represents the list of IPv4 or IPv6 addresses +// resolved during DNS lookup. +func DNSAnswers(val ...string) attribute.KeyValue { + return DNSAnswersKey.StringSlice(val) +} + // DNSQuestionName returns an attribute KeyValue conforming to the // "dns.question.name" semantic conventions. It represents the name being // queried. @@ -4941,7 +5095,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") @@ -4951,7 +5105,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "logo-color" FeatureFlagKeyKey = attribute.Key("feature_flag.key") @@ -4962,7 +5116,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "Flag Manager" FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") @@ -4973,7 +5127,7 @@ const ( // // Type: Enum // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "static", "targeting_match", "error", "default" FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") @@ -4984,7 +5138,7 @@ const ( // // Type: any // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "#ff0000", true, 3 // Note: With some feature flag providers, feature flag results can be quite @@ -5004,7 +5158,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "red", "true", "on" // Note: A semantic identifier, commonly referred to as a variant, provides a @@ -5020,7 +5174,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "proj-1", "ab98sgs", "service1/dev" // @@ -5034,7 +5188,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "1", "01ABCDEF" FeatureFlagVersionKey = attribute.Key("feature_flag.version") @@ -5088,34 +5242,34 @@ func FeatureFlagVersion(val string) attribute.KeyValue { // Enum values for feature_flag.result.reason var ( // The resolved value is static (no dynamic evaluation). - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") // The resolved value fell back to a pre-configured value (no dynamic evaluation // occurred or dynamic evaluation yielded no result). - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") // The resolved value was the result of a dynamic evaluation, such as a rule or // specific user-targeting. - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") // The resolved value was the result of pseudorandom assignment. - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") // The resolved value was retrieved from cache. - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") // The resolved value was the result of the flag being disabled in the // management system. - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") // The reason for the resolved value could not be determined. - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") // The resolved value is non-authoritative or possibly out of date - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") // The resolved value was the result of an error. - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") ) @@ -5208,7 +5362,7 @@ const ( // RequirementLevel: Recommended // Stability: Development // - // Examples: "Zone.Identifer" + // Examples: "Zone.Identifier" // Note: On Linux, a resource fork is used to store additional data with a // filesystem object. A file always has at least one fork for the data portion, // and additional forks may exist. @@ -5863,39 +6017,41 @@ const ( // `db.*`, to further identify and describe the data source. GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") - // GenAIOpenAIRequestServiceTierKey is the attribute Key conforming to the - // "gen_ai.openai.request.service_tier" semantic conventions. It represents the - // service tier requested. May be a specific tier, default, or auto. + // GenAIInputMessagesKey is the attribute Key conforming to the + // "gen_ai.input.messages" semantic conventions. It represents the chat history + // provided to the model as an input. // - // Type: Enum + // Type: any // RequirementLevel: Recommended // Stability: Development // - // Examples: "auto", "default" - GenAIOpenAIRequestServiceTierKey = attribute.Key("gen_ai.openai.request.service_tier") - - // GenAIOpenAIResponseServiceTierKey is the attribute Key conforming to the - // "gen_ai.openai.response.service_tier" semantic conventions. It represents the - // service tier used for the response. + // Examples: "[\n {\n "role": "user",\n "parts": [\n {\n "type": "text",\n + // "content": "Weather in Paris?"\n }\n ]\n },\n {\n "role": "assistant",\n + // "parts": [\n {\n "type": "tool_call",\n "id": + // "call_VSPygqKTWdrhaFErNvMV18Yl",\n "name": "get_weather",\n "arguments": {\n + // "location": "Paris"\n }\n }\n ]\n },\n {\n "role": "tool",\n "parts": [\n {\n + // "type": "tool_call_response",\n "id": " call_VSPygqKTWdrhaFErNvMV18Yl",\n + // "result": "rainy, 57°F"\n }\n ]\n }\n]\n" + // Note: Instrumentations MUST follow [Input messages JSON schema]. + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. // - // Type: string - // RequirementLevel: Recommended - // Stability: Development + // Messages MUST be provided in the order they were sent to the model. + // Instrumentations MAY provide a way for users to filter or truncate + // input messages. // - // Examples: "scale", "default" - GenAIOpenAIResponseServiceTierKey = attribute.Key("gen_ai.openai.response.service_tier") - - // GenAIOpenAIResponseSystemFingerprintKey is the attribute Key conforming to - // the "gen_ai.openai.response.system_fingerprint" semantic conventions. It - // represents a fingerprint to track any eventual change in the Generative AI - // environment. + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. // - // Type: string - // RequirementLevel: Recommended - // Stability: Development + // See [Recording content on attributes] + // section for more details. // - // Examples: "fp_44709d6fcb" - GenAIOpenAIResponseSystemFingerprintKey = attribute.Key("gen_ai.openai.response.system_fingerprint") + // [Input messages JSON schema]: /docs/gen-ai/gen-ai-input-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIInputMessagesKey = attribute.Key("gen_ai.input.messages") // GenAIOperationNameKey is the attribute Key conforming to the // "gen_ai.operation.name" semantic conventions. It represents the name of the @@ -5913,6 +6069,44 @@ const ( // libraries SHOULD use applicable predefined value. GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") + // GenAIOutputMessagesKey is the attribute Key conforming to the + // "gen_ai.output.messages" semantic conventions. It represents the messages + // returned by the model where each message represents a specific model response + // (choice, candidate). + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "role": "assistant",\n "parts": [\n {\n "type": "text",\n + // "content": "The weather in Paris is currently rainy with a temperature of + // 57°F."\n }\n ],\n "finish_reason": "stop"\n }\n]\n" + // Note: Instrumentations MUST follow [Output messages JSON schema] + // + // Each message represents a single output choice/candidate generated by + // the model. Each message corresponds to exactly one generation + // (choice/candidate) and vice versa - one choice cannot be split across + // multiple messages or one message cannot contain parts from multiple choices. + // + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // output messages. + // + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. + // + // See [Recording content on attributes] + // section for more details. + // + // [Output messages JSON schema]: /docs/gen-ai/gen-ai-output-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIOutputMessagesKey = attribute.Key("gen_ai.output.messages") + // GenAIOutputTypeKey is the attribute Key conforming to the // "gen_ai.output.type" semantic conventions. It represents the represents the // content type requested by the client. @@ -5931,6 +6125,35 @@ const ( // `gen_ai.output.{type}.*` attributes. GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") + // GenAIProviderNameKey is the attribute Key conforming to the + // "gen_ai.provider.name" semantic conventions. It represents the Generative AI + // provider as identified by the client or server instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The attribute SHOULD be set based on the instrumentation's best + // knowledge and may differ from the actual model provider. + // + // Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms + // are accessible using the OpenAI REST API and corresponding client libraries, + // but may proxy or host models from different providers. + // + // The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address` + // attributes may help identify the actual system in use. + // + // The `gen_ai.provider.name` attribute acts as a discriminator that + // identifies the GenAI telemetry format flavor specific to that provider + // within GenAI semantic conventions. + // It SHOULD be set consistently with provider-specific attributes and signals. + // For example, GenAI spans, metrics, and events related to AWS Bedrock + // should have the `gen_ai.provider.name` set to `aws.bedrock` and include + // applicable `aws.bedrock.*` attributes and are not expected to include + // `openai.*` attributes. + GenAIProviderNameKey = attribute.Key("gen_ai.provider.name") + // GenAIRequestChoiceCountKey is the attribute Key conforming to the // "gen_ai.request.choice.count" semantic conventions. It represents the target // number of candidate completions to return. @@ -6088,31 +6311,44 @@ const ( // Examples: "gpt-4-0613" GenAIResponseModelKey = attribute.Key("gen_ai.response.model") - // GenAISystemKey is the attribute Key conforming to the "gen_ai.system" - // semantic conventions. It represents the Generative AI product as identified - // by the client or server instrumentation. + // GenAISystemInstructionsKey is the attribute Key conforming to the + // "gen_ai.system_instructions" semantic conventions. It represents the system + // message or instructions provided to the GenAI model separately from the chat + // history. // - // Type: Enum + // Type: any // RequirementLevel: Recommended // Stability: Development // - // Examples: openai - // Note: The `gen_ai.system` describes a family of GenAI models with specific - // model identified - // by `gen_ai.request.model` and `gen_ai.response.model` attributes. + // Examples: "[\n {\n "type": "text",\n "content": "You are an Agent that greet + // users, always use greetings tool to respond"\n }\n]\n", "[\n {\n "type": + // "text",\n "content": "You are a language translator."\n },\n {\n "type": + // "text",\n "content": "Your mission is to translate text in English to + // French."\n }\n]\n" + // Note: This attribute SHOULD be used when the corresponding provider or API + // allows to provide system instructions or messages separately from the + // chat history. // - // The actual GenAI product may differ from the one identified by the client. - // Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI - // client - // libraries. In such cases, the `gen_ai.system` is set to `openai` based on the - // instrumentation's best knowledge, instead of the actual system. The - // `server.address` - // attribute may help identify the actual system in use for `openai`. + // Instructions that are part of the chat history SHOULD be recorded in + // `gen_ai.input.messages` attribute instead. // - // For custom model, a custom friendly name SHOULD be used. - // If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER` - // . - GenAISystemKey = attribute.Key("gen_ai.system") + // Instrumentations MUST follow [System instructions JSON schema]. + // + // When recorded on spans, it MAY be recorded as a JSON string if structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // system instructions. + // + // > [!Warning] + // > This attribute may contain sensitive information. + // + // See [Recording content on attributes] + // section for more details. + // + // [System instructions JSON schema]: /docs/gen-ai/gen-ai-system-instructions.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAISystemInstructionsKey = attribute.Key("gen_ai.system_instructions") // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" // semantic conventions. It represents the type of token being counted. @@ -6237,21 +6473,6 @@ func GenAIDataSourceID(val string) attribute.KeyValue { return GenAIDataSourceIDKey.String(val) } -// GenAIOpenAIResponseServiceTier returns an attribute KeyValue conforming to the -// "gen_ai.openai.response.service_tier" semantic conventions. It represents the -// service tier used for the response. -func GenAIOpenAIResponseServiceTier(val string) attribute.KeyValue { - return GenAIOpenAIResponseServiceTierKey.String(val) -} - -// GenAIOpenAIResponseSystemFingerprint returns an attribute KeyValue conforming -// to the "gen_ai.openai.response.system_fingerprint" semantic conventions. It -// represents a fingerprint to track any eventual change in the Generative AI -// environment. -func GenAIOpenAIResponseSystemFingerprint(val string) attribute.KeyValue { - return GenAIOpenAIResponseSystemFingerprintKey.String(val) -} - // GenAIRequestChoiceCount returns an attribute KeyValue conforming to the // "gen_ai.request.choice.count" semantic conventions. It represents the target // number of candidate completions to return. @@ -6393,16 +6614,6 @@ func GenAIUsageOutputTokens(val int) attribute.KeyValue { return GenAIUsageOutputTokensKey.Int(val) } -// Enum values for gen_ai.openai.request.service_tier -var ( - // The system will utilize scale tier credits until they are exhausted. - // Stability: development - GenAIOpenAIRequestServiceTierAuto = GenAIOpenAIRequestServiceTierKey.String("auto") - // The system will utilize the default scale tier. - // Stability: development - GenAIOpenAIRequestServiceTierDefault = GenAIOpenAIRequestServiceTierKey.String("default") -) - // Enum values for gen_ai.operation.name var ( // Chat completion operation such as [OpenAI Chat API] @@ -6452,57 +6663,79 @@ var ( GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") ) -// Enum values for gen_ai.system +// Enum values for gen_ai.provider.name var ( - // OpenAI + // [OpenAI] // Stability: development - GenAISystemOpenAI = GenAISystemKey.String("openai") + // + // [OpenAI]: https://openai.com/ + GenAIProviderNameOpenAI = GenAIProviderNameKey.String("openai") // Any Google generative AI endpoint // Stability: development - GenAISystemGCPGenAI = GenAISystemKey.String("gcp.gen_ai") - // Vertex AI + GenAIProviderNameGCPGenAI = GenAIProviderNameKey.String("gcp.gen_ai") + // [Vertex AI] // Stability: development - GenAISystemGCPVertexAI = GenAISystemKey.String("gcp.vertex_ai") - // Gemini + // + // [Vertex AI]: https://cloud.google.com/vertex-ai + GenAIProviderNameGCPVertexAI = GenAIProviderNameKey.String("gcp.vertex_ai") + // [Gemini] // Stability: development - GenAISystemGCPGemini = GenAISystemKey.String("gcp.gemini") - // Deprecated: Use 'gcp.vertex_ai' instead. - GenAISystemVertexAI = GenAISystemKey.String("vertex_ai") - // Deprecated: Use 'gcp.gemini' instead. - GenAISystemGemini = GenAISystemKey.String("gemini") - // Anthropic + // + // [Gemini]: https://cloud.google.com/products/gemini + GenAIProviderNameGCPGemini = GenAIProviderNameKey.String("gcp.gemini") + // [Anthropic] // Stability: development - GenAISystemAnthropic = GenAISystemKey.String("anthropic") - // Cohere + // + // [Anthropic]: https://www.anthropic.com/ + GenAIProviderNameAnthropic = GenAIProviderNameKey.String("anthropic") + // [Cohere] // Stability: development - GenAISystemCohere = GenAISystemKey.String("cohere") + // + // [Cohere]: https://cohere.com/ + GenAIProviderNameCohere = GenAIProviderNameKey.String("cohere") // Azure AI Inference // Stability: development - GenAISystemAzAIInference = GenAISystemKey.String("az.ai.inference") - // Azure OpenAI + GenAIProviderNameAzureAIInference = GenAIProviderNameKey.String("azure.ai.inference") + // [Azure OpenAI] // Stability: development - GenAISystemAzAIOpenAI = GenAISystemKey.String("az.ai.openai") - // IBM Watsonx AI + // + // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/ + GenAIProviderNameAzureAIOpenAI = GenAIProviderNameKey.String("azure.ai.openai") + // [IBM Watsonx AI] // Stability: development - GenAISystemIBMWatsonxAI = GenAISystemKey.String("ibm.watsonx.ai") - // AWS Bedrock + // + // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai + GenAIProviderNameIBMWatsonxAI = GenAIProviderNameKey.String("ibm.watsonx.ai") + // [AWS Bedrock] // Stability: development - GenAISystemAWSBedrock = GenAISystemKey.String("aws.bedrock") - // Perplexity + // + // [AWS Bedrock]: https://aws.amazon.com/bedrock + GenAIProviderNameAWSBedrock = GenAIProviderNameKey.String("aws.bedrock") + // [Perplexity] // Stability: development - GenAISystemPerplexity = GenAISystemKey.String("perplexity") - // xAI + // + // [Perplexity]: https://www.perplexity.ai/ + GenAIProviderNamePerplexity = GenAIProviderNameKey.String("perplexity") + // [xAI] // Stability: development - GenAISystemXai = GenAISystemKey.String("xai") - // DeepSeek + // + // [xAI]: https://x.ai/ + GenAIProviderNameXAI = GenAIProviderNameKey.String("x_ai") + // [DeepSeek] // Stability: development - GenAISystemDeepseek = GenAISystemKey.String("deepseek") - // Groq + // + // [DeepSeek]: https://www.deepseek.com/ + GenAIProviderNameDeepseek = GenAIProviderNameKey.String("deepseek") + // [Groq] // Stability: development - GenAISystemGroq = GenAISystemKey.String("groq") - // Mistral AI + // + // [Groq]: https://groq.com/ + GenAIProviderNameGroq = GenAIProviderNameKey.String("groq") + // [Mistral AI] // Stability: development - GenAISystemMistralAI = GenAISystemKey.String("mistral_ai") + // + // [Mistral AI]: https://mistral.ai/ + GenAIProviderNameMistralAI = GenAIProviderNameKey.String("mistral_ai") ) // Enum values for gen_ai.token.type @@ -6510,8 +6743,6 @@ var ( // Input tokens (prompt, input, etc.) // Stability: development GenAITokenTypeInput = GenAITokenTypeKey.String("input") - // Deprecated: Replaced by `output`. - GenAITokenTypeCompletion = GenAITokenTypeKey.String("output") // Output tokens (completion, response, etc.) // Stability: development GenAITokenTypeOutput = GenAITokenTypeKey.String("output") @@ -7312,6 +7543,14 @@ func HTTPRequestBodySize(val int) attribute.KeyValue { return HTTPRequestBodySizeKey.Int(val) } +// HTTPRequestHeader returns an attribute KeyValue conforming to the +// "http.request.header" semantic conventions. It represents the HTTP request +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPRequestHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.request.header."+key, val) +} + // HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the // "http.request.method_original" semantic conventions. It represents the // original HTTP method sent by the client in the request line. @@ -7347,6 +7586,14 @@ func HTTPResponseBodySize(val int) attribute.KeyValue { return HTTPResponseBodySizeKey.Int(val) } +// HTTPResponseHeader returns an attribute KeyValue conforming to the +// "http.response.header" semantic conventions. It represents the HTTP response +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPResponseHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.response.header."+key, val) +} + // HTTPResponseSize returns an attribute KeyValue conforming to the // "http.response.size" semantic conventions. It represents the total size of the // response in bytes. This should be the total number of bytes sent over the @@ -7418,64 +7665,352 @@ var ( // Namespace: hw const ( - // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. - // It represents an identifier for the hardware component, unique within the - // monitored host. + // HwBatteryCapacityKey is the attribute Key conforming to the + // "hw.battery.capacity" semantic conventions. It represents the design capacity + // in Watts-hours or Amper-hours. // // Type: string // RequirementLevel: Recommended // Stability: Development // - // Examples: "win32battery_battery_testsysa33_1" - HwIDKey = attribute.Key("hw.id") + // Examples: "9.3Ah", "50Wh" + HwBatteryCapacityKey = attribute.Key("hw.battery.capacity") - // HwNameKey is the attribute Key conforming to the "hw.name" semantic - // conventions. It represents an easily-recognizable name for the hardware + // HwBatteryChemistryKey is the attribute Key conforming to the + // "hw.battery.chemistry" semantic conventions. It represents the battery + // [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Li-ion", "NiMH" + // + // [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html + HwBatteryChemistryKey = attribute.Key("hw.battery.chemistry") + + // HwBatteryStateKey is the attribute Key conforming to the "hw.battery.state" + // semantic conventions. It represents the current state of the battery. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwBatteryStateKey = attribute.Key("hw.battery.state") + + // HwBiosVersionKey is the attribute Key conforming to the "hw.bios_version" + // semantic conventions. It represents the BIOS version of the hardware // component. // // Type: string // RequirementLevel: Recommended // Stability: Development // - // Examples: "eth0" - HwNameKey = attribute.Key("hw.name") + // Examples: "1.2.3" + HwBiosVersionKey = attribute.Key("hw.bios_version") - // HwParentKey is the attribute Key conforming to the "hw.parent" semantic - // conventions. It represents the unique identifier of the parent component - // (typically the `hw.id` attribute of the enclosure, or disk controller). + // HwDriverVersionKey is the attribute Key conforming to the "hw.driver_version" + // semantic conventions. It represents the driver version for the hardware + // component. // // Type: string // RequirementLevel: Recommended // Stability: Development // - // Examples: "dellStorage_perc_0" - HwParentKey = attribute.Key("hw.parent") + // Examples: "10.2.1-3" + HwDriverVersionKey = attribute.Key("hw.driver_version") - // HwStateKey is the attribute Key conforming to the "hw.state" semantic - // conventions. It represents the current state of the component. + // HwEnclosureTypeKey is the attribute Key conforming to the "hw.enclosure.type" + // semantic conventions. It represents the type of the enclosure (useful for + // modular systems). // - // Type: Enum + // Type: string // RequirementLevel: Recommended // Stability: Development // - // Examples: - HwStateKey = attribute.Key("hw.state") + // Examples: "Computer", "Storage", "Switch" + HwEnclosureTypeKey = attribute.Key("hw.enclosure.type") - // HwTypeKey is the attribute Key conforming to the "hw.type" semantic - // conventions. It represents the type of the component. + // HwFirmwareVersionKey is the attribute Key conforming to the + // "hw.firmware_version" semantic conventions. It represents the firmware + // version of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0.1" + HwFirmwareVersionKey = attribute.Key("hw.firmware_version") + + // HwGpuTaskKey is the attribute Key conforming to the "hw.gpu.task" semantic + // conventions. It represents the type of task the GPU is performing. // // Type: Enum // RequirementLevel: Recommended // Stability: Development // // Examples: - // Note: Describes the category of the hardware component for which `hw.state` + HwGpuTaskKey = attribute.Key("hw.gpu.task") + + // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. + // It represents an identifier for the hardware component, unique within the + // monitored host. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "win32battery_battery_testsysa33_1" + HwIDKey = attribute.Key("hw.id") + + // HwLimitTypeKey is the attribute Key conforming to the "hw.limit_type" + // semantic conventions. It represents the type of limit for hardware + // components. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLimitTypeKey = attribute.Key("hw.limit_type") + + // HwLogicalDiskRaidLevelKey is the attribute Key conforming to the + // "hw.logical_disk.raid_level" semantic conventions. It represents the RAID + // Level of the logical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "RAID0+1", "RAID5", "RAID10" + HwLogicalDiskRaidLevelKey = attribute.Key("hw.logical_disk.raid_level") + + // HwLogicalDiskStateKey is the attribute Key conforming to the + // "hw.logical_disk.state" semantic conventions. It represents the state of the + // logical disk space usage. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLogicalDiskStateKey = attribute.Key("hw.logical_disk.state") + + // HwMemoryTypeKey is the attribute Key conforming to the "hw.memory.type" + // semantic conventions. It represents the type of the memory module. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "DDR4", "DDR5", "LPDDR5" + HwMemoryTypeKey = attribute.Key("hw.memory.type") + + // HwModelKey is the attribute Key conforming to the "hw.model" semantic + // conventions. It represents the descriptive model name of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "PERC H740P", "Intel(R) Core(TM) i7-10700K", "Dell XPS 15 Battery" + HwModelKey = attribute.Key("hw.model") + + // HwNameKey is the attribute Key conforming to the "hw.name" semantic + // conventions. It represents an easily-recognizable name for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "eth0" + HwNameKey = attribute.Key("hw.name") + + // HwNetworkLogicalAddressesKey is the attribute Key conforming to the + // "hw.network.logical_addresses" semantic conventions. It represents the + // logical addresses of the adapter (e.g. IP address, or WWPN). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "172.16.8.21", "57.11.193.42" + HwNetworkLogicalAddressesKey = attribute.Key("hw.network.logical_addresses") + + // HwNetworkPhysicalAddressKey is the attribute Key conforming to the + // "hw.network.physical_address" semantic conventions. It represents the + // physical address of the adapter (e.g. MAC address, or WWNN). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00-90-F5-E9-7B-36" + HwNetworkPhysicalAddressKey = attribute.Key("hw.network.physical_address") + + // HwParentKey is the attribute Key conforming to the "hw.parent" semantic + // conventions. It represents the unique identifier of the parent component + // (typically the `hw.id` attribute of the enclosure, or disk controller). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dellStorage_perc_0" + HwParentKey = attribute.Key("hw.parent") + + // HwPhysicalDiskSmartAttributeKey is the attribute Key conforming to the + // "hw.physical_disk.smart_attribute" semantic conventions. It represents the + // [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute + // of the physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Spin Retry Count", "Seek Error Rate", "Raw Read Error Rate" + // + // [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. + HwPhysicalDiskSmartAttributeKey = attribute.Key("hw.physical_disk.smart_attribute") + + // HwPhysicalDiskStateKey is the attribute Key conforming to the + // "hw.physical_disk.state" semantic conventions. It represents the state of the + // physical disk endurance utilization. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwPhysicalDiskStateKey = attribute.Key("hw.physical_disk.state") + + // HwPhysicalDiskTypeKey is the attribute Key conforming to the + // "hw.physical_disk.type" semantic conventions. It represents the type of the + // physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "HDD", "SSD", "10K" + HwPhysicalDiskTypeKey = attribute.Key("hw.physical_disk.type") + + // HwSensorLocationKey is the attribute Key conforming to the + // "hw.sensor_location" semantic conventions. It represents the location of the + // sensor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpu0", "ps1", "INLET", "CPU0_DIE", "AMBIENT", "MOTHERBOARD", "PS0 + // V3_3", "MAIN_12V", "CPU_VCORE" + HwSensorLocationKey = attribute.Key("hw.sensor_location") + + // HwSerialNumberKey is the attribute Key conforming to the "hw.serial_number" + // semantic conventions. It represents the serial number of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CNFCP0123456789" + HwSerialNumberKey = attribute.Key("hw.serial_number") + + // HwStateKey is the attribute Key conforming to the "hw.state" semantic + // conventions. It represents the current state of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwStateKey = attribute.Key("hw.state") + + // HwTapeDriveOperationTypeKey is the attribute Key conforming to the + // "hw.tape_drive.operation_type" semantic conventions. It represents the type + // of tape drive operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwTapeDriveOperationTypeKey = attribute.Key("hw.tape_drive.operation_type") + + // HwTypeKey is the attribute Key conforming to the "hw.type" semantic + // conventions. It represents the type of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: Describes the category of the hardware component for which `hw.state` // is being reported. For example, `hw.type=temperature` along with // `hw.state=degraded` would indicate that the temperature of the hardware // component has been reported as `degraded`. HwTypeKey = attribute.Key("hw.type") + + // HwVendorKey is the attribute Key conforming to the "hw.vendor" semantic + // conventions. It represents the vendor name of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Dell", "HP", "Intel", "AMD", "LSI", "Lenovo" + HwVendorKey = attribute.Key("hw.vendor") ) +// HwBatteryCapacity returns an attribute KeyValue conforming to the +// "hw.battery.capacity" semantic conventions. It represents the design capacity +// in Watts-hours or Amper-hours. +func HwBatteryCapacity(val string) attribute.KeyValue { + return HwBatteryCapacityKey.String(val) +} + +// HwBatteryChemistry returns an attribute KeyValue conforming to the +// "hw.battery.chemistry" semantic conventions. It represents the battery +// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. +// +// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html +func HwBatteryChemistry(val string) attribute.KeyValue { + return HwBatteryChemistryKey.String(val) +} + +// HwBiosVersion returns an attribute KeyValue conforming to the +// "hw.bios_version" semantic conventions. It represents the BIOS version of the +// hardware component. +func HwBiosVersion(val string) attribute.KeyValue { + return HwBiosVersionKey.String(val) +} + +// HwDriverVersion returns an attribute KeyValue conforming to the +// "hw.driver_version" semantic conventions. It represents the driver version for +// the hardware component. +func HwDriverVersion(val string) attribute.KeyValue { + return HwDriverVersionKey.String(val) +} + +// HwEnclosureType returns an attribute KeyValue conforming to the +// "hw.enclosure.type" semantic conventions. It represents the type of the +// enclosure (useful for modular systems). +func HwEnclosureType(val string) attribute.KeyValue { + return HwEnclosureTypeKey.String(val) +} + +// HwFirmwareVersion returns an attribute KeyValue conforming to the +// "hw.firmware_version" semantic conventions. It represents the firmware version +// of the hardware component. +func HwFirmwareVersion(val string) attribute.KeyValue { + return HwFirmwareVersionKey.String(val) +} + // HwID returns an attribute KeyValue conforming to the "hw.id" semantic // conventions. It represents an identifier for the hardware component, unique // within the monitored host. @@ -7483,6 +8018,26 @@ func HwID(val string) attribute.KeyValue { return HwIDKey.String(val) } +// HwLogicalDiskRaidLevel returns an attribute KeyValue conforming to the +// "hw.logical_disk.raid_level" semantic conventions. It represents the RAID +// Level of the logical disk. +func HwLogicalDiskRaidLevel(val string) attribute.KeyValue { + return HwLogicalDiskRaidLevelKey.String(val) +} + +// HwMemoryType returns an attribute KeyValue conforming to the "hw.memory.type" +// semantic conventions. It represents the type of the memory module. +func HwMemoryType(val string) attribute.KeyValue { + return HwMemoryTypeKey.String(val) +} + +// HwModel returns an attribute KeyValue conforming to the "hw.model" semantic +// conventions. It represents the descriptive model name of the hardware +// component. +func HwModel(val string) attribute.KeyValue { + return HwModelKey.String(val) +} + // HwName returns an attribute KeyValue conforming to the "hw.name" semantic // conventions. It represents an easily-recognizable name for the hardware // component. @@ -7490,6 +8045,20 @@ func HwName(val string) attribute.KeyValue { return HwNameKey.String(val) } +// HwNetworkLogicalAddresses returns an attribute KeyValue conforming to the +// "hw.network.logical_addresses" semantic conventions. It represents the logical +// addresses of the adapter (e.g. IP address, or WWPN). +func HwNetworkLogicalAddresses(val ...string) attribute.KeyValue { + return HwNetworkLogicalAddressesKey.StringSlice(val) +} + +// HwNetworkPhysicalAddress returns an attribute KeyValue conforming to the +// "hw.network.physical_address" semantic conventions. It represents the physical +// address of the adapter (e.g. MAC address, or WWNN). +func HwNetworkPhysicalAddress(val string) attribute.KeyValue { + return HwNetworkPhysicalAddressKey.String(val) +} + // HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic // conventions. It represents the unique identifier of the parent component // (typically the `hw.id` attribute of the enclosure, or disk controller). @@ -7497,17 +8066,144 @@ func HwParent(val string) attribute.KeyValue { return HwParentKey.String(val) } -// Enum values for hw.state +// HwPhysicalDiskSmartAttribute returns an attribute KeyValue conforming to the +// "hw.physical_disk.smart_attribute" semantic conventions. It represents the +// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute +// of the physical disk. +// +// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. +func HwPhysicalDiskSmartAttribute(val string) attribute.KeyValue { + return HwPhysicalDiskSmartAttributeKey.String(val) +} + +// HwPhysicalDiskType returns an attribute KeyValue conforming to the +// "hw.physical_disk.type" semantic conventions. It represents the type of the +// physical disk. +func HwPhysicalDiskType(val string) attribute.KeyValue { + return HwPhysicalDiskTypeKey.String(val) +} + +// HwSensorLocation returns an attribute KeyValue conforming to the +// "hw.sensor_location" semantic conventions. It represents the location of the +// sensor. +func HwSensorLocation(val string) attribute.KeyValue { + return HwSensorLocationKey.String(val) +} + +// HwSerialNumber returns an attribute KeyValue conforming to the +// "hw.serial_number" semantic conventions. It represents the serial number of +// the hardware component. +func HwSerialNumber(val string) attribute.KeyValue { + return HwSerialNumberKey.String(val) +} + +// HwVendor returns an attribute KeyValue conforming to the "hw.vendor" semantic +// conventions. It represents the vendor name of the hardware component. +func HwVendor(val string) attribute.KeyValue { + return HwVendorKey.String(val) +} + +// Enum values for hw.battery.state var ( - // Ok + // Charging // Stability: development - HwStateOk = HwStateKey.String("ok") + HwBatteryStateCharging = HwBatteryStateKey.String("charging") + // Discharging + // Stability: development + HwBatteryStateDischarging = HwBatteryStateKey.String("discharging") +) + +// Enum values for hw.gpu.task +var ( + // Decoder + // Stability: development + HwGpuTaskDecoder = HwGpuTaskKey.String("decoder") + // Encoder + // Stability: development + HwGpuTaskEncoder = HwGpuTaskKey.String("encoder") + // General + // Stability: development + HwGpuTaskGeneral = HwGpuTaskKey.String("general") +) + +// Enum values for hw.limit_type +var ( + // Critical + // Stability: development + HwLimitTypeCritical = HwLimitTypeKey.String("critical") + // Degraded + // Stability: development + HwLimitTypeDegraded = HwLimitTypeKey.String("degraded") + // High Critical + // Stability: development + HwLimitTypeHighCritical = HwLimitTypeKey.String("high.critical") + // High Degraded + // Stability: development + HwLimitTypeHighDegraded = HwLimitTypeKey.String("high.degraded") + // Low Critical + // Stability: development + HwLimitTypeLowCritical = HwLimitTypeKey.String("low.critical") + // Low Degraded + // Stability: development + HwLimitTypeLowDegraded = HwLimitTypeKey.String("low.degraded") + // Maximum + // Stability: development + HwLimitTypeMax = HwLimitTypeKey.String("max") + // Throttled + // Stability: development + HwLimitTypeThrottled = HwLimitTypeKey.String("throttled") + // Turbo + // Stability: development + HwLimitTypeTurbo = HwLimitTypeKey.String("turbo") +) + +// Enum values for hw.logical_disk.state +var ( + // Used + // Stability: development + HwLogicalDiskStateUsed = HwLogicalDiskStateKey.String("used") + // Free + // Stability: development + HwLogicalDiskStateFree = HwLogicalDiskStateKey.String("free") +) + +// Enum values for hw.physical_disk.state +var ( + // Remaining + // Stability: development + HwPhysicalDiskStateRemaining = HwPhysicalDiskStateKey.String("remaining") +) + +// Enum values for hw.state +var ( // Degraded // Stability: development HwStateDegraded = HwStateKey.String("degraded") // Failed // Stability: development HwStateFailed = HwStateKey.String("failed") + // Needs Cleaning + // Stability: development + HwStateNeedsCleaning = HwStateKey.String("needs_cleaning") + // OK + // Stability: development + HwStateOk = HwStateKey.String("ok") + // Predicted Failure + // Stability: development + HwStatePredictedFailure = HwStateKey.String("predicted_failure") +) + +// Enum values for hw.tape_drive.operation_type +var ( + // Mount + // Stability: development + HwTapeDriveOperationTypeMount = HwTapeDriveOperationTypeKey.String("mount") + // Unmount + // Stability: development + HwTapeDriveOperationTypeUnmount = HwTapeDriveOperationTypeKey.String("unmount") + // Clean + // Stability: development + HwTapeDriveOperationTypeClean = HwTapeDriveOperationTypeKey.String("clean") ) // Enum values for hw.type @@ -7686,6 +8382,36 @@ const ( // Examples: "Evicted", "Error" K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + // K8SContainerStatusReasonKey is the attribute Key conforming to the + // "k8s.container.status.reason" semantic conventions. It represents the reason + // for the container state. Corresponds to the `reason` field of the: + // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ContainerCreating", "CrashLoopBackOff", + // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff", + // "OOMKilled", "Completed", "Error", "ContainerCannotRun" + // + // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core + // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core + K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason") + + // K8SContainerStatusStateKey is the attribute Key conforming to the + // "k8s.container.status.state" semantic conventions. It represents the state of + // the container. [K8s ContainerState]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "terminated", "running", "waiting" + // + // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core + K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state") + // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" // semantic conventions. It represents the name of the CronJob. // @@ -7749,6 +8475,18 @@ const ( // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + // K8SHPAMetricTypeKey is the attribute Key conforming to the + // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric + // source for the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Resource", "ContainerResource" + // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA. + K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type") + // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic // conventions. It represents the name of the horizontal pod autoscaler. // @@ -7759,6 +8497,43 @@ const ( // Examples: "opentelemetry" K8SHPANameKey = attribute.Key("k8s.hpa.name") + // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the + // API version of the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "apps/v1", "autoscaling/v2" + // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA + // spec. + K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version") + + // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Deployment", "StatefulSet" + // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind") + + // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-deployment", "my-statefulset" + // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name") + // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic // conventions. It represents the UID of the horizontal pod autoscaler. // @@ -7769,6 +8544,17 @@ const ( // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") + // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size" + // semantic conventions. It represents the size (identifier) of the K8s huge + // page. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2Mi" + K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size") + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic // conventions. It represents the name of the Job. // @@ -7815,6 +8601,46 @@ const ( // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") + // K8SNodeConditionStatusKey is the attribute Key conforming to the + // "k8s.node.condition.status" semantic conventions. It represents the status of + // the condition, one of True, False, Unknown. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "true", "false", "unknown" + // Note: This attribute aligns with the `status` field of the + // [NodeCondition] + // + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status") + + // K8SNodeConditionTypeKey is the attribute Key conforming to the + // "k8s.node.condition.type" semantic conventions. It represents the condition + // type of a K8s Node. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Ready", "DiskPressure" + // Note: K8s Node conditions as described + // by [K8s documentation]. + // + // This attribute aligns with the `type` field of the + // [NodeCondition] + // + // The set of possible values is not limited to those listed here. Managed + // Kubernetes environments, + // or custom controllers MAY introduce additional node condition types. + // When this occurs, the exact value as reported by the Kubernetes API SHOULD be + // used. + // + // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type") + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" // semantic conventions. It represents the name of the Node. // @@ -7910,6 +8736,25 @@ const ( // Examples: "opentelemetry" K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") + // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the + // "k8s.resourcequota.resource_name" semantic conventions. It represents the + // name of the K8s resource a resource quota defines. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "count/replicationcontrollers" + // Note: The value for this attribute can be either the full + // `count/[.]` string (e.g., count/deployments.apps, + // count/pods), or, for certain core Kubernetes resources, just the resource + // name (e.g., pods, services, configmaps). Both forms are supported by + // Kubernetes for object count quotas. See + // [Kubernetes Resource Quotas documentation] for more details. + // + // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota + K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name") + // K8SResourceQuotaUIDKey is the attribute Key conforming to the // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the // resource quota. @@ -7943,6 +8788,19 @@ const ( // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + // K8SStorageclassNameKey is the attribute Key conforming to the + // "k8s.storageclass.name" semantic conventions. It represents the name of K8s + // [StorageClass] object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gold.storageclass.storage.k8s.io" + // + // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io + K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name") + // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" // semantic conventions. It represents the name of the K8s volume. // @@ -8001,6 +8859,22 @@ func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { return K8SContainerStatusLastTerminatedReasonKey.String(val) } +// K8SCronJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob +// annotation placed on the CronJob, the `` being the annotation name, the +// value being the annotation value. +func K8SCronJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.annotation."+key, val) +} + +// K8SCronJobLabel returns an attribute KeyValue conforming to the +// "k8s.cronjob.label" semantic conventions. It represents the label placed on +// the CronJob, the `` being the label name, the value being the label +// value. +func K8SCronJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.label."+key, val) +} + // K8SCronJobName returns an attribute KeyValue conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. @@ -8014,6 +8888,22 @@ func K8SCronJobUID(val string) attribute.KeyValue { return K8SCronJobUIDKey.String(val) } +// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.daemonset.annotation" semantic conventions. It represents the annotation +// placed on the DaemonSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.annotation."+key, val) +} + +// K8SDaemonSetLabel returns an attribute KeyValue conforming to the +// "k8s.daemonset.label" semantic conventions. It represents the label placed on +// the DaemonSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDaemonSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.label."+key, val) +} + // K8SDaemonSetName returns an attribute KeyValue conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. @@ -8028,6 +8918,22 @@ func K8SDaemonSetUID(val string) attribute.KeyValue { return K8SDaemonSetUIDKey.String(val) } +// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the +// "k8s.deployment.annotation" semantic conventions. It represents the annotation +// placed on the Deployment, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.annotation."+key, val) +} + +// K8SDeploymentLabel returns an attribute KeyValue conforming to the +// "k8s.deployment.label" semantic conventions. It represents the label placed on +// the Deployment, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDeploymentLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.label."+key, val) +} + // K8SDeploymentName returns an attribute KeyValue conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of the // Deployment. @@ -8042,18 +8948,69 @@ func K8SDeploymentUID(val string) attribute.KeyValue { return K8SDeploymentUIDKey.String(val) } +// K8SHPAMetricType returns an attribute KeyValue conforming to the +// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric +// source for the horizontal pod autoscaler. +func K8SHPAMetricType(val string) attribute.KeyValue { + return K8SHPAMetricTypeKey.String(val) +} + // K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" // semantic conventions. It represents the name of the horizontal pod autoscaler. func K8SHPAName(val string) attribute.KeyValue { return K8SHPANameKey.String(val) } +// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the +// API version of the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue { + return K8SHPAScaletargetrefAPIVersionKey.String(val) +} + +// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefKind(val string) attribute.KeyValue { + return K8SHPAScaletargetrefKindKey.String(val) +} + +// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefName(val string) attribute.KeyValue { + return K8SHPAScaletargetrefNameKey.String(val) +} + // K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" // semantic conventions. It represents the UID of the horizontal pod autoscaler. func K8SHPAUID(val string) attribute.KeyValue { return K8SHPAUIDKey.String(val) } +// K8SHugepageSize returns an attribute KeyValue conforming to the +// "k8s.hugepage.size" semantic conventions. It represents the size (identifier) +// of the K8s huge page. +func K8SHugepageSize(val string) attribute.KeyValue { + return K8SHugepageSizeKey.String(val) +} + +// K8SJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.job.annotation" semantic conventions. It represents the annotation placed +// on the Job, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.annotation."+key, val) +} + +// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label" +// semantic conventions. It represents the label placed on the Job, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.label."+key, val) +} + // K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. func K8SJobName(val string) attribute.KeyValue { @@ -8066,6 +9023,22 @@ func K8SJobUID(val string) attribute.KeyValue { return K8SJobUIDKey.String(val) } +// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the +// "k8s.namespace.annotation" semantic conventions. It represents the annotation +// placed on the Namespace, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.annotation."+key, val) +} + +// K8SNamespaceLabel returns an attribute KeyValue conforming to the +// "k8s.namespace.label" semantic conventions. It represents the label placed on +// the Namespace, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SNamespaceLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.label."+key, val) +} + // K8SNamespaceName returns an attribute KeyValue conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. @@ -8073,6 +9046,22 @@ func K8SNamespaceName(val string) attribute.KeyValue { return K8SNamespaceNameKey.String(val) } +// K8SNodeAnnotation returns an attribute KeyValue conforming to the +// "k8s.node.annotation" semantic conventions. It represents the annotation +// placed on the Node, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SNodeAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.annotation."+key, val) +} + +// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label" +// semantic conventions. It represents the label placed on the Node, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SNodeLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.label."+key, val) +} + // K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" // semantic conventions. It represents the name of the Node. func K8SNodeName(val string) attribute.KeyValue { @@ -8085,6 +9074,21 @@ func K8SNodeUID(val string) attribute.KeyValue { return K8SNodeUIDKey.String(val) } +// K8SPodAnnotation returns an attribute KeyValue conforming to the +// "k8s.pod.annotation" semantic conventions. It represents the annotation placed +// on the Pod, the `` being the annotation name, the value being the +// annotation value. +func K8SPodAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.annotation."+key, val) +} + +// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label" +// semantic conventions. It represents the label placed on the Pod, the `` +// being the label name, the value being the label value. +func K8SPodLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.label."+key, val) +} + // K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. func K8SPodName(val string) attribute.KeyValue { @@ -8097,6 +9101,22 @@ func K8SPodUID(val string) attribute.KeyValue { return K8SPodUIDKey.String(val) } +// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.replicaset.annotation" semantic conventions. It represents the annotation +// placed on the ReplicaSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.annotation."+key, val) +} + +// K8SReplicaSetLabel returns an attribute KeyValue conforming to the +// "k8s.replicaset.label" semantic conventions. It represents the label placed on +// the ReplicaSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SReplicaSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.label."+key, val) +} + // K8SReplicaSetName returns an attribute KeyValue conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of the // ReplicaSet. @@ -8132,6 +9152,13 @@ func K8SResourceQuotaName(val string) attribute.KeyValue { return K8SResourceQuotaNameKey.String(val) } +// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.resource_name" semantic conventions. It represents the name +// of the K8s resource a resource quota defines. +func K8SResourceQuotaResourceName(val string) attribute.KeyValue { + return K8SResourceQuotaResourceNameKey.String(val) +} + // K8SResourceQuotaUID returns an attribute KeyValue conforming to the // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the // resource quota. @@ -8139,6 +9166,22 @@ func K8SResourceQuotaUID(val string) attribute.KeyValue { return K8SResourceQuotaUIDKey.String(val) } +// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.statefulset.annotation" semantic conventions. It represents the +// annotation placed on the StatefulSet, the `` being the annotation name, +// the value being the annotation value, even if the value is empty. +func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.annotation."+key, val) +} + +// K8SStatefulSetLabel returns an attribute KeyValue conforming to the +// "k8s.statefulset.label" semantic conventions. It represents the label placed +// on the StatefulSet, the `` being the label name, the value being the +// label value, even if the value is empty. +func K8SStatefulSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.label."+key, val) +} + // K8SStatefulSetName returns an attribute KeyValue conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of the // StatefulSet. @@ -8153,6 +9196,15 @@ func K8SStatefulSetUID(val string) attribute.KeyValue { return K8SStatefulSetUIDKey.String(val) } +// K8SStorageclassName returns an attribute KeyValue conforming to the +// "k8s.storageclass.name" semantic conventions. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func K8SStorageclassName(val string) attribute.KeyValue { + return K8SStorageclassNameKey.String(val) +} + // K8SVolumeName returns an attribute KeyValue conforming to the // "k8s.volume.name" semantic conventions. It represents the name of the K8s // volume. @@ -8160,6 +9212,50 @@ func K8SVolumeName(val string) attribute.KeyValue { return K8SVolumeNameKey.String(val) } +// Enum values for k8s.container.status.reason +var ( + // The container is being created. + // Stability: development + K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating") + // The container is in a crash loop back off state. + // Stability: development + K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff") + // There was an error creating the container configuration. + // Stability: development + K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError") + // There was an error pulling the container image. + // Stability: development + K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull") + // The container image pull is in back off state. + // Stability: development + K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff") + // The container was killed due to out of memory. + // Stability: development + K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled") + // The container has completed execution. + // Stability: development + K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed") + // There was an error with the container. + // Stability: development + K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error") + // The container cannot run. + // Stability: development + K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun") +) + +// Enum values for k8s.container.status.state +var ( + // The container has terminated. + // Stability: development + K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated") + // The container is running. + // Stability: development + K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running") + // The container is waiting. + // Stability: development + K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting") +) + // Enum values for k8s.namespace.phase var ( // Active namespace phase as described by [K8s API] @@ -8174,6 +9270,39 @@ var ( K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") ) +// Enum values for k8s.node.condition.status +var ( + // condition_true + // Stability: development + K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true") + // condition_false + // Stability: development + K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false") + // condition_unknown + // Stability: development + K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown") +) + +// Enum values for k8s.node.condition.type +var ( + // The node is healthy and ready to accept pods + // Stability: development + K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready") + // Pressure exists on the disk size—that is, if the disk capacity is low + // Stability: development + K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure") + // Pressure exists on the node memory—that is, if the node memory is low + // Stability: development + K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure") + // Pressure exists on the processes—that is, if there are too many processes + // on the node + // Stability: development + K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure") + // The network for the node is not correctly configured + // Stability: development + K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable") +) + // Enum values for k8s.volume.type var ( // A [persistentVolumeClaim] volume @@ -8371,6 +9500,27 @@ var ( LogIostreamStderr = LogIostreamKey.String("stderr") ) +// Namespace: mainframe +const ( + // MainframeLparNameKey is the attribute Key conforming to the + // "mainframe.lpar.name" semantic conventions. It represents the name of the + // logical partition that hosts a systems with a mainframe operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "LPAR01" + MainframeLparNameKey = attribute.Key("mainframe.lpar.name") +) + +// MainframeLparName returns an attribute KeyValue conforming to the +// "mainframe.lpar.name" semantic conventions. It represents the name of the +// logical partition that hosts a systems with a mainframe operating system. +func MainframeLparName(val string) attribute.KeyValue { + return MainframeLparNameKey.String(val) +} + // Namespace: messaging const ( // MessagingBatchMessageCountKey is the attribute Key conforming to the @@ -9084,10 +10234,6 @@ var ( // // Stability: development MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") - // Deprecated: Replaced by `process`. - MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("deliver") - // Deprecated: Replaced by `send`. - MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") ) // Enum values for messaging.rocketmq.consumption_model @@ -9137,6 +10283,9 @@ var ( // Apache ActiveMQ // Stability: development MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") + // Amazon Simple Notification Service (SNS) + // Stability: development + MessagingSystemAWSSNS = MessagingSystemKey.String("aws.sns") // Amazon Simple Queue Service (SQS) // Stability: development MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") @@ -9654,6 +10803,66 @@ func OCIManifestDigest(val string) attribute.KeyValue { return OCIManifestDigestKey.String(val) } +// Namespace: openai +const ( + // OpenAIRequestServiceTierKey is the attribute Key conforming to the + // "openai.request.service_tier" semantic conventions. It represents the service + // tier requested. May be a specific tier, default, or auto. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "auto", "default" + OpenAIRequestServiceTierKey = attribute.Key("openai.request.service_tier") + + // OpenAIResponseServiceTierKey is the attribute Key conforming to the + // "openai.response.service_tier" semantic conventions. It represents the + // service tier used for the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "scale", "default" + OpenAIResponseServiceTierKey = attribute.Key("openai.response.service_tier") + + // OpenAIResponseSystemFingerprintKey is the attribute Key conforming to the + // "openai.response.system_fingerprint" semantic conventions. It represents a + // fingerprint to track any eventual change in the Generative AI environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fp_44709d6fcb" + OpenAIResponseSystemFingerprintKey = attribute.Key("openai.response.system_fingerprint") +) + +// OpenAIResponseServiceTier returns an attribute KeyValue conforming to the +// "openai.response.service_tier" semantic conventions. It represents the service +// tier used for the response. +func OpenAIResponseServiceTier(val string) attribute.KeyValue { + return OpenAIResponseServiceTierKey.String(val) +} + +// OpenAIResponseSystemFingerprint returns an attribute KeyValue conforming to +// the "openai.response.system_fingerprint" semantic conventions. It represents a +// fingerprint to track any eventual change in the Generative AI environment. +func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue { + return OpenAIResponseSystemFingerprintKey.String(val) +} + +// Enum values for openai.request.service_tier +var ( + // The system will utilize scale tier credits until they are exhausted. + // Stability: development + OpenAIRequestServiceTierAuto = OpenAIRequestServiceTierKey.String("auto") + // The system will utilize the default scale tier. + // Stability: development + OpenAIRequestServiceTierDefault = OpenAIRequestServiceTierKey.String("default") +) + // Namespace: opentracing const ( // OpenTracingRefTypeKey is the attribute Key conforming to the @@ -9802,7 +11011,7 @@ var ( OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS // Stability: development - OSTypeZOS = OSTypeKey.String("z_os") + OSTypeZOS = OSTypeKey.String("zos") ) // Namespace: otel @@ -9866,6 +11075,17 @@ const ( // Examples: "io.opentelemetry.contrib.mongodb" OTelScopeNameKey = attribute.Key("otel.scope.name") + // OTelScopeSchemaURLKey is the attribute Key conforming to the + // "otel.scope.schema_url" semantic conventions. It represents the schema URL of + // the instrumentation scope. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://opentelemetry.io/schemas/1.31.0" + OTelScopeSchemaURLKey = attribute.Key("otel.scope.schema_url") + // OTelScopeVersionKey is the attribute Key conforming to the // "otel.scope.version" semantic conventions. It represents the version of the // instrumentation scope - (`InstrumentationScope.Version` in OTLP). @@ -9877,6 +11097,20 @@ const ( // Examples: "1.0.0" OTelScopeVersionKey = attribute.Key("otel.scope.version") + // OTelSpanParentOriginKey is the attribute Key conforming to the + // "otel.span.parent.origin" semantic conventions. It represents the determines + // whether the span has a parent span, and if so, + // [whether it is a remote parent]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin") + // OTelSpanSamplingResultKey is the attribute Key conforming to the // "otel.span.sampling_result" semantic conventions. It represents the result // value of the sampler for this span. @@ -9926,6 +11160,13 @@ func OTelScopeName(val string) attribute.KeyValue { return OTelScopeNameKey.String(val) } +// OTelScopeSchemaURL returns an attribute KeyValue conforming to the +// "otel.scope.schema_url" semantic conventions. It represents the schema URL of +// the instrumentation scope. +func OTelScopeSchemaURL(val string) attribute.KeyValue { + return OTelScopeSchemaURLKey.String(val) +} + // OTelScopeVersion returns an attribute KeyValue conforming to the // "otel.scope.version" semantic conventions. It represents the version of the // instrumentation scope - (`InstrumentationScope.Version` in OTLP). @@ -9970,6 +11211,10 @@ var ( // // Stability: development OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") + // Zipkin span exporter over HTTP + // + // Stability: development + OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter") // OTLP log record exporter over gRPC with protobuf serialization // // Stability: development @@ -9998,6 +11243,27 @@ var ( // // Stability: development OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") + // Prometheus metric exporter over HTTP with the default text-based format + // + // Stability: development + OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter") +) + +// Enum values for otel.span.parent.origin +var ( + // The span does not have a parent, it is a root span + // Stability: development + OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none") + // The span has a parent and the parent's span context [isRemote()] is false + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local") + // The span has a parent and the parent's span context [isRemote()] is true + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote") ) // Enum values for otel.span.sampling_result @@ -10497,6 +11763,14 @@ func ProcessCreationTime(val string) attribute.KeyValue { return ProcessCreationTimeKey.String(val) } +// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the +// "process.environment_variable" semantic conventions. It represents the process +// environment variables, `` being the environment variable name, the value +// being the environment variable value. +func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue { + return attribute.String("process.environment_variable."+key, val) +} + // ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the // "process.executable.build_id.gnu" semantic conventions. It represents the GNU // build ID as found in the `.note.gnu.build-id` ELF section (hex string). @@ -10965,6 +12239,38 @@ const ( RPCSystemKey = attribute.Key("rpc.system") ) +// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the +// connect request metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val) +} + +// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the +// connect response metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val) +} + +// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC +// request metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.request.metadata."+key, val) +} + +// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC +// response metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.response.metadata."+key, val) +} + // RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` // property of response if it is an error response. @@ -11820,15 +13126,12 @@ var ( // Enum values for system.memory.state var ( - // used + // Actual used virtual memory in bytes. // Stability: development SystemMemoryStateUsed = SystemMemoryStateKey.String("used") // free // Stability: development SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // Deprecated: Removed, report shared memory usage with - // `metric.system.memory.shared` metric. - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") // buffers // Stability: development SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") @@ -13727,8 +15030,6 @@ var ( // // [GitLab]: https://gitlab.com VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") - // Deprecated: Replaced by `gitea`. - VCSProviderNameGittea = VCSProviderNameKey.String("gittea") // [Gitea] // Stability: development // @@ -13848,4 +15149,45 @@ func WebEngineName(val string) attribute.KeyValue { // engine. func WebEngineVersion(val string) attribute.KeyValue { return WebEngineVersionKey.String(val) +} + +// Namespace: zos +const ( + // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic + // conventions. It represents the System Management Facility (SMF) Identifier + // uniquely identified a z/OS system within a SYSPLEX or mainframe environment + // and is used for system and performance analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYS1" + ZOSSmfIDKey = attribute.Key("zos.smf.id") + + // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name" + // semantic conventions. It represents the name of the SYSPLEX to which the z/OS + // system belongs too. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYSPLEX1" + ZOSSysplexNameKey = attribute.Key("zos.sysplex.name") +) + +// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic +// conventions. It represents the System Management Facility (SMF) Identifier +// uniquely identified a z/OS system within a SYSPLEX or mainframe environment +// and is used for system and performance analysis. +func ZOSSmfID(val string) attribute.KeyValue { + return ZOSSmfIDKey.String(val) +} + +// ZOSSysplexName returns an attribute KeyValue conforming to the +// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX +// to which the z/OS system belongs too. +func ZOSSysplexName(val string) attribute.KeyValue { + return ZOSSysplexNameKey.String(val) } \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go similarity index 96% rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go index 2c5c7ebd0..111010321 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go @@ -4,6 +4,6 @@ // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.34.0 +// patterns for OpenTelemetry things. This package represents the v1.37.0 // version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go new file mode 100644 index 000000000..267979c05 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +import ( + "reflect" + + "go.opentelemetry.io/otel/attribute" +) + +// ErrorType returns an [attribute.KeyValue] identifying the error type of err. +// +// If err is nil, the returned attribute has the default value +// [ErrorTypeOther]. +// +// If err's type has the method +// +// ErrorType() string +// +// then the returned attribute has the value of err.ErrorType(). Otherwise, the +// returned attribute has a value derived from the concrete type of err. +// +// The key of the returned attribute is [ErrorTypeKey]. +func ErrorType(err error) attribute.KeyValue { + if err == nil { + return ErrorTypeOther + } + + return ErrorTypeKey.String(errorType(err)) +} + +func errorType(err error) string { + var s string + if et, ok := err.(interface{ ErrorType() string }); ok { + // Prioritize the ErrorType method if available. + s = et.ErrorType() + } + if s == "" { + // Fallback to reflection if the ErrorType method is not supported or + // returns an empty value. + + t := reflect.TypeOf(err) + pkg, name := t.PkgPath(), t.Name() + if pkg != "" && name != "" { + s = pkg + "." + name + } else { + // The type has no package path or name (predeclared, not-defined, + // or alias for a not-defined type). + // + // This is not guaranteed to be unique, but is a best effort. + s = t.String() + } + } + return s +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go similarity index 98% rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go index 88a998f1e..e67469a4f 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" const ( // ExceptionEventName is the name of the Span event representing an exception. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go similarity index 85% rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go index 3c23d4592..f8a0b7044 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go @@ -1,9 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.34.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.37.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/trace/LICENSE +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go index f3aa39813..8763936a8 100644 --- a/vendor/go.opentelemetry.io/otel/trace/auto.go +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/internal/telemetry" ) @@ -39,7 +39,7 @@ type autoTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = autoTracerProvider{} -func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { +func (autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { cfg := NewTracerConfig(opts...) return autoTracer{ name: name, @@ -81,7 +81,7 @@ func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOpt // Expected to be implemented in eBPF. // //go:noinline -func (t *autoTracer) start( +func (*autoTracer) start( ctx context.Context, spanPtr *autoSpan, psc *SpanContext, diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 9c0b720a4..d9ecef1ca 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -4,6 +4,7 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( + "slices" "time" "go.opentelemetry.io/otel/attribute" @@ -73,7 +74,7 @@ func (cfg *SpanConfig) Timestamp() time.Time { return cfg.timestamp } -// StackTrace checks whether stack trace capturing is enabled. +// StackTrace reports whether stack trace capturing is enabled. func (cfg *SpanConfig) StackTrace() bool { return cfg.stackTrace } @@ -154,7 +155,7 @@ func (cfg *EventConfig) Timestamp() time.Time { return cfg.timestamp } -// StackTrace checks whether stack trace capturing is enabled. +// StackTrace reports whether stack trace capturing is enabled. func (cfg *EventConfig) StackTrace() bool { return cfg.stackTrace } @@ -304,12 +305,50 @@ func WithInstrumentationVersion(version string) TracerOption { }) } -// WithInstrumentationAttributes sets the instrumentation attributes. +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +// WithInstrumentationAttributes adds the instrumentation attributes. // -// The passed attributes will be de-duplicated. +// This is equivalent to calling [WithInstrumentationAttributeSet] with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet adds the instrumentation attributes. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) TracerOption { + if set.Len() == 0 { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + return config + }) + } + return tracerOptionFunc(func(config TracerConfig) TracerConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } diff --git a/vendor/go.opentelemetry.io/otel/trace/hex.go b/vendor/go.opentelemetry.io/otel/trace/hex.go new file mode 100644 index 000000000..1cbef1d4b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/hex.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +const ( + // hexLU is a hex lookup table of the 16 lowercase hex digits. + // The character values of the string are indexed at the equivalent + // hexadecimal value they represent. This table efficiently encodes byte data + // into a string representation of hexadecimal. + hexLU = "0123456789abcdef" + + // hexRev is a reverse hex lookup table for lowercase hex digits. + // The table is efficiently decodes a hexadecimal string into bytes. + // Valid hexadecimal characters are indexed at their respective values. All + // other invalid ASCII characters are represented with '\xff'. + // + // The '\xff' character is used as invalid because no valid character has + // the upper 4 bits set. Meaning, an efficient validation can be performed + // over multiple character parsing by checking these bits remain zero. + hexRev = "" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +) diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go index f663547b4..ff0f6eac6 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go @@ -52,7 +52,7 @@ func Map(key string, value ...Attr) Attr { return Attr{key, MapValue(value...)} } -// Equal returns if a is equal to b. +// Equal reports whether a is equal to b. func (a Attr) Equal(b Attr) bool { return a.Key == b.Key && a.Value.Equal(b.Value) } diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go index 7b1ae3c4e..bea56f2e7 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go @@ -22,7 +22,7 @@ func (tid TraceID) String() string { return hex.EncodeToString(tid[:]) } -// IsEmpty returns false if id contains at least one non-zero byte. +// IsEmpty reports whether the TraceID contains only zero bytes. func (tid TraceID) IsEmpty() bool { return tid == [traceIDSize]byte{} } @@ -50,7 +50,7 @@ func (sid SpanID) String() string { return hex.EncodeToString(sid[:]) } -// IsEmpty returns true if the span ID contains at least one non-zero byte. +// IsEmpty reports whether the SpanID contains only zero bytes. func (sid SpanID) IsEmpty() bool { return sid == [spanIDSize]byte{} } @@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) { } // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst []byte, src []byte) error { +func unmarshalJSON(dst, src []byte) error { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { src = src[1 : l-1] } diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go index ae9ce102a..cb7927b81 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go @@ -257,10 +257,10 @@ func (v Value) Kind() ValueKind { } } -// Empty returns if v does not hold any value. +// Empty reports whether v does not hold any value. func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } -// Equal returns if v is equal to w. +// Equal reports whether v is equal to w. func (v Value) Equal(w Value) bool { k1 := v.Kind() k2 := w.Kind() diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index 0f56e4dbb..400fab123 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -26,7 +26,7 @@ type noopTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = noopTracerProvider{} // Tracer returns noop implementation of Tracer. -func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { +func (noopTracerProvider) Tracer(string, ...TracerOption) Tracer { return noopTracer{} } @@ -37,7 +37,7 @@ var _ Tracer = noopTracer{} // Start carries forward a non-recording Span, if one is present in the context, otherwise it // creates a no-op Span. -func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { +func (noopTracer) Start(ctx context.Context, _ string, _ ...SpanStartOption) (context.Context, Span) { span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go index 64a4f1b36..689d220df 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -51,7 +51,7 @@ type Tracer struct{ embedded.Tracer } // If ctx contains a span context, the returned span will also contain that // span context. If the span context in ctx is for a non-recording span, that // span instance will be returned directly. -func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { +func (Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { span := trace.SpanFromContext(ctx) // If the parent context contains a non-zero span context, that span diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go index d3aa476ee..d01e79366 100644 --- a/vendor/go.opentelemetry.io/otel/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -66,6 +66,10 @@ type Span interface { // SetAttributes sets kv as attributes of the Span. If a key from kv // already exists for an attribute of the Span it will be overwritten with // the value contained in kv. + // + // Note that adding attributes at span creation using [WithAttributes] is preferred + // to calling SetAttribute later, as samplers can only consider information + // already present during span creation. SetAttributes(kv ...attribute.KeyValue) // TracerProvider returns a TracerProvider that can be used to generate diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index d49adf671..ee6f4bcb2 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -4,8 +4,6 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( - "bytes" - "encoding/hex" "encoding/json" ) @@ -38,21 +36,47 @@ var ( _ json.Marshaler = nilTraceID ) -// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// IsValid reports whether the trace TraceID is valid. A valid trace ID does // not consist of zeros only. func (t TraceID) IsValid() bool { - return !bytes.Equal(t[:], nilTraceID[:]) + return t != nilTraceID } // MarshalJSON implements a custom marshal function to encode TraceID // as a hex string. func (t TraceID) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) + b := [32 + 2]byte{0: '"', 33: '"'} + h := t.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of a TraceID. func (t TraceID) String() string { - return hex.EncodeToString(t[:]) + h := t.hexBytes() + return string(h[:]) +} + +// hexBytes returns the hex string representation form of a TraceID. +func (t TraceID) hexBytes() [32]byte { + return [32]byte{ + hexLU[t[0x0]>>4], hexLU[t[0x0]&0xf], + hexLU[t[0x1]>>4], hexLU[t[0x1]&0xf], + hexLU[t[0x2]>>4], hexLU[t[0x2]&0xf], + hexLU[t[0x3]>>4], hexLU[t[0x3]&0xf], + hexLU[t[0x4]>>4], hexLU[t[0x4]&0xf], + hexLU[t[0x5]>>4], hexLU[t[0x5]&0xf], + hexLU[t[0x6]>>4], hexLU[t[0x6]&0xf], + hexLU[t[0x7]>>4], hexLU[t[0x7]&0xf], + hexLU[t[0x8]>>4], hexLU[t[0x8]&0xf], + hexLU[t[0x9]>>4], hexLU[t[0x9]&0xf], + hexLU[t[0xa]>>4], hexLU[t[0xa]&0xf], + hexLU[t[0xb]>>4], hexLU[t[0xb]&0xf], + hexLU[t[0xc]>>4], hexLU[t[0xc]&0xf], + hexLU[t[0xd]>>4], hexLU[t[0xd]&0xf], + hexLU[t[0xe]>>4], hexLU[t[0xe]&0xf], + hexLU[t[0xf]>>4], hexLU[t[0xf]&0xf], + } } // SpanID is a unique identity of a span in a trace. @@ -63,21 +87,38 @@ var ( _ json.Marshaler = nilSpanID ) -// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// IsValid reports whether the SpanID is valid. A valid SpanID does not consist // of zeros only. func (s SpanID) IsValid() bool { - return !bytes.Equal(s[:], nilSpanID[:]) + return s != nilSpanID } // MarshalJSON implements a custom marshal function to encode SpanID // as a hex string. func (s SpanID) MarshalJSON() ([]byte, error) { - return json.Marshal(s.String()) + b := [16 + 2]byte{0: '"', 17: '"'} + h := s.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of a SpanID. func (s SpanID) String() string { - return hex.EncodeToString(s[:]) + b := s.hexBytes() + return string(b[:]) +} + +func (s SpanID) hexBytes() [16]byte { + return [16]byte{ + hexLU[s[0]>>4], hexLU[s[0]&0xf], + hexLU[s[1]>>4], hexLU[s[1]&0xf], + hexLU[s[2]>>4], hexLU[s[2]&0xf], + hexLU[s[3]>>4], hexLU[s[3]&0xf], + hexLU[s[4]>>4], hexLU[s[4]&0xf], + hexLU[s[5]>>4], hexLU[s[5]&0xf], + hexLU[s[6]>>4], hexLU[s[6]&0xf], + hexLU[s[7]>>4], hexLU[s[7]&0xf], + } } // TraceIDFromHex returns a TraceID from a hex string if it is compliant with @@ -85,65 +126,58 @@ func (s SpanID) String() string { // https://www.w3.org/TR/trace-context/#trace-id // nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. func TraceIDFromHex(h string) (TraceID, error) { - t := TraceID{} if len(h) != 32 { - return t, errInvalidTraceIDLength + return [16]byte{}, errInvalidTraceIDLength } - - if err := decodeHex(h, t[:]); err != nil { - return t, err + var b [16]byte + invalidMark := byte(0) + for i := 0; i < len(h); i += 4 { + b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] + b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] + invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] } - - if !t.IsValid() { - return t, errNilTraceID + // If the upper 4 bits of any byte are not zero, there was an invalid hex + // character since invalid hex characters are 0xff in hexRev. + if invalidMark&0xf0 != 0 { + return [16]byte{}, errInvalidHexID + } + // If we didn't set any bits, then h was all zeros. + if invalidMark == 0 { + return [16]byte{}, errNilTraceID } - return t, nil + return b, nil } // SpanIDFromHex returns a SpanID from a hex string if it is compliant // with the w3c trace-context specification. // See more at https://www.w3.org/TR/trace-context/#parent-id func SpanIDFromHex(h string) (SpanID, error) { - s := SpanID{} if len(h) != 16 { - return s, errInvalidSpanIDLength - } - - if err := decodeHex(h, s[:]); err != nil { - return s, err + return [8]byte{}, errInvalidSpanIDLength } - - if !s.IsValid() { - return s, errNilSpanID + var b [8]byte + invalidMark := byte(0) + for i := 0; i < len(h); i += 4 { + b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] + b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] + invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] } - return s, nil -} - -func decodeHex(h string, b []byte) error { - for _, r := range h { - switch { - case 'a' <= r && r <= 'f': - continue - case '0' <= r && r <= '9': - continue - default: - return errInvalidHexID - } + // If the upper 4 bits of any byte are not zero, there was an invalid hex + // character since invalid hex characters are 0xff in hexRev. + if invalidMark&0xf0 != 0 { + return [8]byte{}, errInvalidHexID } - - decoded, err := hex.DecodeString(h) - if err != nil { - return err + // If we didn't set any bits, then h was all zeros. + if invalidMark == 0 { + return [8]byte{}, errNilSpanID } - - copy(b, decoded) - return nil + return b, nil } // TraceFlags contains flags that can be set on a SpanContext. type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. -// IsSampled returns if the sampling bit is set in the TraceFlags. +// IsSampled reports whether the sampling bit is set in the TraceFlags. func (tf TraceFlags) IsSampled() bool { return tf&FlagsSampled == FlagsSampled } @@ -160,12 +194,20 @@ func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // // MarshalJSON implements a custom marshal function to encode TraceFlags // as a hex string. func (tf TraceFlags) MarshalJSON() ([]byte, error) { - return json.Marshal(tf.String()) + b := [2 + 2]byte{0: '"', 3: '"'} + h := tf.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of TraceFlags. func (tf TraceFlags) String() string { - return hex.EncodeToString([]byte{byte(tf)}[:]) + h := tf.hexBytes() + return string(h[:]) +} + +func (tf TraceFlags) hexBytes() [2]byte { + return [2]byte{hexLU[tf>>4], hexLU[tf&0xf]} } // SpanContextConfig contains mutable fields usable for constructing @@ -201,13 +243,13 @@ type SpanContext struct { var _ json.Marshaler = SpanContext{} -// IsValid returns if the SpanContext is valid. A valid span context has a +// IsValid reports whether the SpanContext is valid. A valid span context has a // valid TraceID and SpanID. func (sc SpanContext) IsValid() bool { return sc.HasTraceID() && sc.HasSpanID() } -// IsRemote indicates whether the SpanContext represents a remotely-created Span. +// IsRemote reports whether the SpanContext represents a remotely-created Span. func (sc SpanContext) IsRemote() bool { return sc.remote } @@ -228,7 +270,7 @@ func (sc SpanContext) TraceID() TraceID { return sc.traceID } -// HasTraceID checks if the SpanContext has a valid TraceID. +// HasTraceID reports whether the SpanContext has a valid TraceID. func (sc SpanContext) HasTraceID() bool { return sc.traceID.IsValid() } @@ -249,7 +291,7 @@ func (sc SpanContext) SpanID() SpanID { return sc.spanID } -// HasSpanID checks if the SpanContext has a valid SpanID. +// HasSpanID reports whether the SpanContext has a valid SpanID. func (sc SpanContext) HasSpanID() bool { return sc.spanID.IsValid() } @@ -270,7 +312,7 @@ func (sc SpanContext) TraceFlags() TraceFlags { return sc.traceFlags } -// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +// IsSampled reports whether the sampling bit is set in the SpanContext's TraceFlags. func (sc SpanContext) IsSampled() bool { return sc.traceFlags.IsSampled() } @@ -302,7 +344,7 @@ func (sc SpanContext) WithTraceState(state TraceState) SpanContext { } } -// Equal is a predicate that determines whether two SpanContext values are equal. +// Equal reports whether two SpanContext values are equal. func (sc SpanContext) Equal(other SpanContext) bool { return sc.traceID == other.traceID && sc.spanID == other.spanID && diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index dc5e34cad..073adae2f 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -80,7 +80,7 @@ func checkKeyRemain(key string) bool { // // param n is remain part length, should be 255 in simple-key or 13 in system-id. func checkKeyPart(key string, n int) bool { - if len(key) == 0 { + if key == "" { return false } first := key[0] // key's first char @@ -102,7 +102,7 @@ func isAlphaNum(c byte) bool { // // param n is remain part length, should be 240 exactly. func checkKeyTenant(key string, n int) bool { - if len(key) == 0 { + if key == "" { return false } return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) @@ -191,7 +191,7 @@ func ParseTraceState(ts string) (TraceState, error) { for ts != "" { var memberStr string memberStr, ts, _ = strings.Cut(ts, listDelimiters) - if len(memberStr) == 0 { + if memberStr == "" { continue } diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 7afe92b59..0d5b02918 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.37.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 9d4742a17..f4a3893eb 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.37.0 + version: v1.39.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -22,11 +22,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.59.0 + version: v0.61.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.13.0 + version: v0.15.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/log/logtest @@ -36,9 +36,28 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.12 + version: v0.0.14 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - go.opentelemetry.io/otel/trace/internal/telemetry/test +modules: + go.opentelemetry.io/otel/exporters/stdout/stdouttrace: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/prometheus: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp: + version-refs: + - ./internal/version.go diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml index 2346df135..74faaa71d 100644 --- a/vendor/go.uber.org/zap/.golangci.yml +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -25,7 +25,7 @@ linters-settings: govet: # These govet checks are disabled by default, but they're useful. enable: - - niliness + - nilness - reflectvaluecompare - sortslice - unusedwrite diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 6d6cd5f4d..86e7e6f98 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,6 +3,16 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 1.27.1 (19 Nov 2025) +Enhancements: +* [#1501][]: prevent `Object` from panicking on nils +* [#1511][]: Fix a race condition in `WithLazy`. + +Thanks to @rabbbit, @alshopov, @jquirke, @arukiidou for their contributions to this release. + +[#1501]: https://github.com/uber-go/zap/pull/1501 +[#1511]: https://github.com/uber-go/zap/pull/1511 + ## 1.27.0 (20 Feb 2024) Enhancements: * [#1378][]: Add `WithLazy` method for `SugaredLogger`. diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md index e327d9aa5..bc988b72e 100644 --- a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md +++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md @@ -71,5 +71,5 @@ This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]. -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ +[homepage]: https://contributor-covenant.org +[version]: https://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/zap/LICENSE b/vendor/go.uber.org/zap/LICENSE index 6652bed45..3883b9a7e 100644 --- a/vendor/go.uber.org/zap/LICENSE +++ b/vendor/go.uber.org/zap/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2016-2017 Uber Technologies, Inc. +Copyright (c) 2016-2024 Uber Technologies, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile index eb1cee53b..f9db385b3 100644 --- a/vendor/go.uber.org/zap/Makefile +++ b/vendor/go.uber.org/zap/Makefile @@ -24,7 +24,7 @@ golangci-lint: @$(foreach mod,$(MODULE_DIRS), \ (cd $(mod) && \ echo "[lint] golangci-lint: $(mod)" && \ - golangci-lint run --path-prefix $(mod)) &&) true + golangci-lint run --path-prefix $(mod) ./...) &&) true .PHONY: tidy tidy: diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index 6743930b8..1884afabc 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -398,6 +398,9 @@ func Durationp(key string, val *time.Duration) Field { // struct-like user-defined types to the logging context. The struct's // MarshalLogObject method is called lazily. func Object(key string, val zapcore.ObjectMarshaler) Field { + if val == nil { + return nilField(key) + } return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} } @@ -431,6 +434,13 @@ func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { return nil } +// DictObject constructs a [zapcore.ObjectMarshaler] with the given list of fields. +// The resulting object marshaler can be used as input to [Object], [Objects], or +// any other functions that expect an object marshaler. +func DictObject(val ...Field) zapcore.ObjectMarshaler { + return dictObject(val) +} + // We discovered an issue where zap.Any can cause a performance degradation // when used in new goroutines. // diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go index 2be8f6515..1cae2c164 100644 --- a/vendor/go.uber.org/zap/http_handler.go +++ b/vendor/go.uber.org/zap/http_handler.go @@ -71,7 +71,7 @@ import ( func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err := lvl.serveHTTP(w, r); err != nil { w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "internal error: %v", err) + _, _ = fmt.Fprintf(w, "internal error: %v", err) } } diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index c4d300323..2d0ef141b 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -381,7 +381,11 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { if stack.Count() == 0 { if log.addCaller { - fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) + _, _ = fmt.Fprintf( + log.errorOutput, + "%v Logger.check error: failed to get caller\n", + ent.Time.UTC(), + ) _ = log.errorOutput.Sync() } return ce diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index 43d357ac9..04a3c1e63 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -125,7 +125,11 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { return optionFunc(func(log *Logger) { core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) if err != nil { - fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) + _, _ = fmt.Fprintf( + log.errorOutput, + "failed to IncreaseLevel: %v\n", + err, + ) } else { log.core = core } diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go index 499772a00..92202280f 100644 --- a/vendor/go.uber.org/zap/sink.go +++ b/vendor/go.uber.org/zap/sink.go @@ -71,7 +71,7 @@ func newSinkRegistry() *sinkRegistry { return sr } -// RegisterScheme registers the given factory for the specific scheme. +// RegisterSink registers the given factory for the specific scheme. func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { sr.mu.Lock() defer sr.mu.Unlock() diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go index a40e93b3e..4b426a564 100644 --- a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -188,32 +188,33 @@ func (s *BufferedWriteSyncer) flushLoop() { // Stop closes the buffer, cleans up background goroutines, and flushes // remaining unwritten data. func (s *BufferedWriteSyncer) Stop() (err error) { - var stopped bool - // Critical section. - func() { + stopped := func() bool { s.mu.Lock() defer s.mu.Unlock() if !s.initialized { - return + return false } - stopped = s.stopped - if stopped { - return + if s.stopped { + return false } s.stopped = true s.ticker.Stop() close(s.stop) // tell flushLoop to stop - <-s.done // and wait until it has + return true }() - // Don't call Sync on consecutive Stops. + // Not initialized, or already stopped, no need for any cleanup. if !stopped { - err = s.Sync() + return } - return err + // Wait for flushLoop to end outside of the lock, as it may need the lock to complete. + // See https://github.com/uber-go/zap/issues/1428 for details. + <-s.done + + return s.Sync() } diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index cc2b4e07b..98eea5154 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -105,7 +105,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, if i > 0 { line.AppendString(c.ConsoleSeparator) } - fmt.Fprint(line, arr.elems[i]) + _, _ = fmt.Fprint(line, arr.elems[i]) } putSliceEncoder(arr) diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 459a5d7ce..841752f2e 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -241,7 +241,12 @@ func (ce *CheckedEntry) Write(fields ...Field) { // If the entry is dirty, log an internal error; because the // CheckedEntry is being used after it was returned to the pool, // the message may be an amalgamation from multiple call sites. - fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) + _, _ = fmt.Fprintf( + ce.ErrorOutput, + "%v Unsafe CheckedEntry re-use near Entry %+v.\n", + ce.Time, + ce.Entry, + ) _ = ce.ErrorOutput.Sync() // ignore error } return @@ -253,7 +258,12 @@ func (ce *CheckedEntry) Write(fields ...Field) { err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) } if err != nil && ce.ErrorOutput != nil { - fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + _, _ = fmt.Fprintf( + ce.ErrorOutput, + "%v write error: %v\n", + ce.Time, + err, + ) _ = ce.ErrorOutput.Sync() // ignore error } diff --git a/vendor/go.uber.org/zap/zapcore/lazy_with.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go index 05288d6a8..500809de0 100644 --- a/vendor/go.uber.org/zap/zapcore/lazy_with.go +++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go @@ -23,7 +23,8 @@ package zapcore import "sync" type lazyWithCore struct { - Core + core Core + originalCore Core sync.Once fields []Field } @@ -32,23 +33,45 @@ type lazyWithCore struct { // the logger is written to (or is further chained in a lon-lazy manner). func NewLazyWith(core Core, fields []Field) Core { return &lazyWithCore{ - Core: core, - fields: fields, + core: nil, // core is allocated once `initOnce` is called. + originalCore: core, + fields: fields, } } func (d *lazyWithCore) initOnce() { d.Once.Do(func() { - d.Core = d.Core.With(d.fields) + d.core = d.originalCore.With(d.fields) }) } func (d *lazyWithCore) With(fields []Field) Core { d.initOnce() - return d.Core.With(fields) + return d.core.With(fields) } func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry { + // This is safe because `lazyWithCore` doesn't change the level. + // So we can delagate the level check, any not `initOnce` + // just for the check. + if !d.originalCore.Enabled(e.Level) { + return ce + } + d.initOnce() + return d.core.Check(e, ce) +} + +func (d *lazyWithCore) Enabled(level Level) bool { + // Like above, this is safe because `lazyWithCore` doesn't change the level. + return d.originalCore.Enabled(level) +} + +func (d *lazyWithCore) Write(e Entry, fields []Field) error { + d.initOnce() + return d.core.Write(e, fields) +} + +func (d *lazyWithCore) Sync() error { d.initOnce() - return d.Core.Check(e, ce) + return d.core.Sync() } diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go index e01a24131..f3e166d67 100644 --- a/vendor/go.uber.org/zap/zapcore/level.go +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -179,19 +179,19 @@ func (l *Level) UnmarshalText(text []byte) error { func (l *Level) unmarshalText(text []byte) bool { switch string(text) { - case "debug", "DEBUG": + case "debug": *l = DebugLevel - case "info", "INFO", "": // make the zero value useful + case "info", "": // make the zero value useful *l = InfoLevel - case "warn", "WARN": + case "warn", "warning": *l = WarnLevel - case "error", "ERROR": + case "error": *l = ErrorLevel - case "dpanic", "DPANIC": + case "dpanic": *l = DPanicLevel - case "panic", "PANIC": + case "panic": *l = PanicLevel - case "fatal", "FATAL": + case "fatal": *l = FatalLevel default: return false diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 1965913e5..ccb87e6da 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -376,11 +376,24 @@ type ClientConn struct { // completely unresponsive connection. pendingResets int + // readBeforeStreamID is the smallest stream ID that has not been followed by + // a frame read from the peer. We use this to determine when a request may + // have been sent to a completely unresponsive connection: + // If the request ID is less than readBeforeStreamID, then we have had some + // indication of life on the connection since sending the request. + readBeforeStreamID uint32 + // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. // Lock reqmu BEFORE mu or wmu. reqHeaderMu chan struct{} + // internalStateHook reports state changes back to the net/http.ClientConn. + // Note that this is different from the user state hook registered by + // net/http.ClientConn.SetStateHook: The internal hook calls ClientConn, + // which calls the user hook. + internalStateHook func() + // wmu is held while writing. // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. // Only acquire both at the same time when changing peer settings. @@ -710,7 +723,7 @@ func canRetryError(err error) bool { func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { if t.transportTestHooks != nil { - return t.newClientConn(nil, singleUse) + return t.newClientConn(nil, singleUse, nil) } host, _, err := net.SplitHostPort(addr) if err != nil { @@ -720,7 +733,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -772,10 +785,10 @@ func (t *Transport) expectContinueTimeout() time.Duration { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, internalStateHook func()) (*ClientConn, error) { conf := configFromTransport(t) cc := &ClientConn{ t: t, @@ -797,6 +810,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), lastActive: time.Now(), + internalStateHook: internalStateHook, } if t.transportTestHooks != nil { t.transportTestHooks.newclientconn(cc) @@ -1037,10 +1051,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } - st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && - !cc.doNotReuse && - int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && - !cc.tooIdleLocked() + st.canTakeNewRequest = maxConcurrentOkay && cc.isUsableLocked() // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). @@ -1056,6 +1067,31 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { return } +func (cc *ClientConn) isUsableLocked() bool { + return cc.goAway == nil && + !cc.closed && + !cc.closing && + !cc.doNotReuse && + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && + !cc.tooIdleLocked() +} + +// canReserveLocked reports whether a net/http.ClientConn can reserve a slot on this conn. +// +// This follows slightly different rules than clientConnIdleState.canTakeNewRequest. +// We only permit reservations up to the conn's concurrency limit. +// This differs from ClientConn.ReserveNewRequest, which permits reservations +// past the limit when StrictMaxConcurrentStreams is set. +func (cc *ClientConn) canReserveLocked() bool { + if cc.currentRequestCountLocked() >= int(cc.maxConcurrentStreams) { + return false + } + if !cc.isUsableLocked() { + return false + } + return true +} + // currentRequestCountLocked reports the number of concurrency slots currently in use, // including active streams, reserved slots, and reset streams waiting for acknowledgement. func (cc *ClientConn) currentRequestCountLocked() int { @@ -1067,6 +1103,14 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool { return st.canTakeNewRequest } +// availableLocked reports the number of concurrency slots available. +func (cc *ClientConn) availableLocked() int { + if !cc.canTakeNewRequestLocked() { + return 0 + } + return max(0, int(cc.maxConcurrentStreams)-cc.currentRequestCountLocked()) +} + // tooIdleLocked reports whether this connection has been been sitting idle // for too much wall time. func (cc *ClientConn) tooIdleLocked() bool { @@ -1091,6 +1135,7 @@ func (cc *ClientConn) closeConn() { t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) defer t.Stop() cc.tconn.Close() + cc.maybeCallStateHook() } // A tls.Conn.Close can hang for a long time if the peer is unresponsive. @@ -1616,6 +1661,8 @@ func (cs *clientStream) cleanupWriteRequest(err error) { } bodyClosed := cs.reqBodyClosed closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil + // Have we read any frames from the connection since sending this request? + readSinceStream := cc.readBeforeStreamID > cs.ID cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1647,8 +1694,10 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // // This could be due to the server becoming unresponsive. // To avoid sending too many requests on a dead connection, - // we let the request continue to consume a concurrency slot - // until we can confirm the server is still responding. + // if we haven't read any frames from the connection since + // sending this request, we let it continue to consume + // a concurrency slot until we can confirm the server is + // still responding. // We do this by sending a PING frame along with the RST_STREAM // (unless a ping is already in flight). // @@ -1659,7 +1708,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // because it's short lived and will probably be closed before // we get the ping response. ping := false - if !closeOnIdle { + if !closeOnIdle && !readSinceStream { cc.mu.Lock() // rstStreamPingsBlocked works around a gRPC behavior: // see comment on the field for details. @@ -1693,6 +1742,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { } close(cs.donec) + cc.maybeCallStateHook() } // awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams. @@ -2745,6 +2795,7 @@ func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientSt // See comment on ClientConn.rstStreamPingsBlocked for details. rl.cc.rstStreamPingsBlocked = false } + rl.cc.readBeforeStreamID = rl.cc.nextStreamID cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -2795,6 +2846,7 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc := rl.cc + defer cc.maybeCallStateHook() cc.mu.Lock() defer cc.mu.Unlock() @@ -2975,6 +3027,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if f.IsAck() { cc := rl.cc + defer cc.maybeCallStateHook() cc.mu.Lock() defer cc.mu.Unlock() // If ack, notify listener if any @@ -3198,9 +3251,13 @@ func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err erro } // noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. +// if there's already a cached connection to the host. // (The field is exported so it can be accessed via reflect from net/http; tested // by TestNoDialH2RoundTripperType) +// +// A noDialH2RoundTripper is registered with http1.Transport.RegisterProtocol, +// and the http1.Transport can use type assertions to call non-RoundTrip methods on it. +// This lets us expose, for example, NewClientConn to net/http. type noDialH2RoundTripper struct{ *Transport } func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -3211,6 +3268,85 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err return res, err } +func (rt noDialH2RoundTripper) NewClientConn(conn net.Conn, internalStateHook func()) (http.RoundTripper, error) { + tr := rt.Transport + cc, err := tr.newClientConn(conn, tr.disableKeepAlives(), internalStateHook) + if err != nil { + return nil, err + } + + // RoundTrip should block when the conn is at its concurrency limit, + // not return an error. Setting strictMaxConcurrentStreams enables this. + cc.strictMaxConcurrentStreams = true + + return netHTTPClientConn{cc}, nil +} + +// netHTTPClientConn wraps ClientConn and implements the interface net/http expects from +// the RoundTripper returned by NewClientConn. +type netHTTPClientConn struct { + cc *ClientConn +} + +func (cc netHTTPClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.cc.RoundTrip(req) +} + +func (cc netHTTPClientConn) Close() error { + return cc.cc.Close() +} + +func (cc netHTTPClientConn) Err() error { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + if cc.cc.closed { + return errors.New("connection closed") + } + return nil +} + +func (cc netHTTPClientConn) Reserve() error { + defer cc.cc.maybeCallStateHook() + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + if !cc.cc.canReserveLocked() { + return errors.New("connection is unavailable") + } + cc.cc.streamsReserved++ + return nil +} + +func (cc netHTTPClientConn) Release() { + defer cc.cc.maybeCallStateHook() + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + // We don't complain if streamsReserved is 0. + // + // This is consistent with RoundTrip: both Release and RoundTrip will + // consume a reservation iff one exists. + if cc.cc.streamsReserved > 0 { + cc.cc.streamsReserved-- + } +} + +func (cc netHTTPClientConn) Available() int { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + return cc.cc.availableLocked() +} + +func (cc netHTTPClientConn) InFlight() int { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + return cc.cc.currentRequestCountLocked() +} + +func (cc *ClientConn) maybeCallStateHook() { + if cc.internalStateHook != nil { + cc.internalStateHook() + } +} + func (t *Transport) idleConnTimeout() time.Duration { // to keep things backwards compatible, we use non-zero values of // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying diff --git a/vendor/modules.txt b/vendor/modules.txt index 21766be42..076e9a630 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1661,8 +1661,8 @@ github.com/inconshreveable/mousetrap # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/cpuid/v2 v2.2.9 -## explicit; go 1.20 +# github.com/klauspost/cpuid/v2 v2.2.10 +## explicit; go 1.22 github.com/klauspost/cpuid/v2 # github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 ## explicit; go 1.22.0 @@ -1770,16 +1770,22 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/redis/go-redis/v9 v9.6.3 +# github.com/redis/go-redis/v9 v9.17.2 ## explicit; go 1.18 github.com/redis/go-redis/v9 +github.com/redis/go-redis/v9/auth github.com/redis/go-redis/v9/internal +github.com/redis/go-redis/v9/internal/auth/streaming github.com/redis/go-redis/v9/internal/hashtag github.com/redis/go-redis/v9/internal/hscan +github.com/redis/go-redis/v9/internal/interfaces +github.com/redis/go-redis/v9/internal/maintnotifications/logs github.com/redis/go-redis/v9/internal/pool github.com/redis/go-redis/v9/internal/proto github.com/redis/go-redis/v9/internal/rand github.com/redis/go-redis/v9/internal/util +github.com/redis/go-redis/v9/maintnotifications +github.com/redis/go-redis/v9/push # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 @@ -1809,28 +1815,28 @@ github.com/yudai/golcs # github.com/zeebo/xxh3 v1.0.2 ## explicit; go 1.17 github.com/zeebo/xxh3 -# go.opentelemetry.io/auto/sdk v1.1.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/auto/sdk v1.2.1 +## explicit; go 1.24.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/otel v1.37.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/attribute/internal +go.opentelemetry.io/otel/attribute/internal/xxhash go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation -go.opentelemetry.io/otel/semconv/v1.26.0 -go.opentelemetry.io/otel/semconv/v1.34.0 -# go.opentelemetry.io/otel/metric v1.37.0 -## explicit; go 1.23.0 +go.opentelemetry.io/otel/semconv/v1.37.0 +# go.opentelemetry.io/otel/metric v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/trace v1.37.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/trace v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry @@ -1838,7 +1844,7 @@ go.opentelemetry.io/otel/trace/noop # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# go.uber.org/zap v1.27.0 +# go.uber.org/zap v1.27.1 ## explicit; go 1.19 go.uber.org/zap go.uber.org/zap/buffer @@ -1859,7 +1865,7 @@ go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 ## explicit; go 1.16 go.yaml.in/yaml/v3 -# golang.org/x/net v0.47.0 +# golang.org/x/net v0.48.0 ## explicit; go 1.24.0 golang.org/x/net/html golang.org/x/net/html/atom @@ -2578,7 +2584,7 @@ kmodules.xyz/prober/api/v1 kmodules.xyz/resource-metadata/apis/node kmodules.xyz/resource-metadata/apis/node/v1alpha1 kmodules.xyz/resource-metadata/crds -# kubedb.dev/apimachinery v0.60.0-rc.0.0.20251227140622-3fb97b2591c2 +# kubedb.dev/apimachinery v0.60.0-rc.1 ## explicit; go 1.25.0 kubedb.dev/apimachinery/apis kubedb.dev/apimachinery/apis/archiver/v1alpha1 @@ -2624,7 +2630,7 @@ kubedb.dev/apimachinery/crds kubedb.dev/apimachinery/pkg/double_optin kubedb.dev/apimachinery/pkg/factory kubedb.dev/apimachinery/pkg/lib -# kubedb.dev/db-client-go v0.15.0-rc.0 +# kubedb.dev/db-client-go v0.15.0-rc.1 ## explicit; go 1.25.5 kubedb.dev/db-client-go/elasticsearch kubedb.dev/db-client-go/redis